2024-12-09 10:55:57,788 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5fb759d6 2024-12-09 10:55:57,810 main DEBUG Took 0.018828 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-09 10:55:57,810 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-09 10:55:57,811 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-09 10:55:57,812 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-09 10:55:57,813 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 10:55:57,824 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-09 10:55:57,843 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 10:55:57,845 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 10:55:57,846 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 10:55:57,858 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 10:55:57,859 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 10:55:57,860 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 10:55:57,861 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 10:55:57,861 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 10:55:57,862 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 10:55:57,863 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 10:55:57,864 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 10:55:57,864 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 10:55:57,865 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 10:55:57,865 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 10:55:57,866 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 10:55:57,866 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 10:55:57,867 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 10:55:57,867 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 10:55:57,868 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 10:55:57,868 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 10:55:57,869 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 10:55:57,870 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 10:55:57,870 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 10:55:57,871 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 10:55:57,871 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 10:55:57,872 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-09 10:55:57,874 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 10:55:57,876 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-09 10:55:57,878 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-09 10:55:57,879 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-09 10:55:57,881 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-09 10:55:57,881 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-09 10:55:57,894 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-09 10:55:57,900 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-09 10:55:57,903 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-09 10:55:57,904 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-09 10:55:57,904 main DEBUG createAppenders(={Console}) 2024-12-09 10:55:57,905 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5fb759d6 initialized 2024-12-09 10:55:57,907 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5fb759d6 2024-12-09 10:55:57,907 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5fb759d6 OK. 2024-12-09 10:55:57,908 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-09 10:55:57,909 main DEBUG OutputStream closed 2024-12-09 10:55:57,911 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-09 10:55:57,911 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-09 10:55:57,911 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@5b03b9fe OK 2024-12-09 10:55:58,195 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-09 10:55:58,207 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-09 10:55:58,224 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-09 10:55:58,226 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-09 10:55:58,228 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-09 10:55:58,228 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-09 10:55:58,236 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-09 10:55:58,237 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-09 10:55:58,238 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-09 10:55:58,239 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-09 10:55:58,240 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-09 10:55:58,241 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-09 10:55:58,241 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-09 10:55:58,242 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-09 10:55:58,242 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-09 10:55:58,243 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-09 10:55:58,243 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-09 10:55:58,244 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-09 10:55:58,248 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-09 10:55:58,248 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@5bf8fa12) with optional ClassLoader: null 2024-12-09 10:55:58,248 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-09 10:55:58,249 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@5bf8fa12] started OK. 2024-12-09T10:55:58,306 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestExportSnapshot timeout: 13 mins 2024-12-09 10:55:58,312 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-09 10:55:58,315 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-09T10:55:59,228 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961 2024-12-09T10:55:59,253 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestMobExportSnapshot timeout: 13 mins 2024-12-09T10:55:59,254 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestMobSecureExportSnapshot timeout: 13 mins 2024-12-09T10:55:59,422 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-09T10:55:59,841 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T10:55:59,868 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0, deleteOnExit=true 2024-12-09T10:55:59,869 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T10:55:59,870 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/test.cache.data in system properties and HBase conf 2024-12-09T10:55:59,871 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T10:55:59,871 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/hadoop.log.dir in system properties and HBase conf 2024-12-09T10:55:59,872 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T10:55:59,873 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T10:55:59,874 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T10:56:00,000 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T10:56:00,006 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T10:56:00,007 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T10:56:00,008 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T10:56:00,009 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T10:56:00,010 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T10:56:00,010 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T10:56:00,011 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T10:56:00,012 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T10:56:00,012 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T10:56:00,013 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/nfs.dump.dir in system properties and HBase conf 2024-12-09T10:56:00,014 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/java.io.tmpdir in system properties and HBase conf 2024-12-09T10:56:00,014 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T10:56:00,015 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T10:56:00,016 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T10:56:01,329 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-09T10:56:01,466 INFO [Time-limited test {}] log.Log(170): Logging initialized @5090ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-09T10:56:01,593 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T10:56:01,687 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T10:56:01,768 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T10:56:01,768 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T10:56:01,771 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T10:56:01,831 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T10:56:01,836 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@acc92c5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/hadoop.log.dir/,AVAILABLE} 2024-12-09T10:56:01,837 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2a5226e7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T10:56:02,161 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7c82353a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/java.io.tmpdir/jetty-localhost-36709-hadoop-hdfs-3_4_1-tests_jar-_-any-16413438274208655452/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T10:56:02,197 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6b557a24{HTTP/1.1, (http/1.1)}{localhost:36709} 2024-12-09T10:56:02,198 INFO [Time-limited test {}] server.Server(415): Started @5824ms 2024-12-09T10:56:02,769 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T10:56:02,795 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T10:56:02,797 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T10:56:02,797 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T10:56:02,797 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T10:56:02,798 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@65dbe8a3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/hadoop.log.dir/,AVAILABLE} 2024-12-09T10:56:02,799 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6c0b65f0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T10:56:02,966 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@17559536{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/java.io.tmpdir/jetty-localhost-46253-hadoop-hdfs-3_4_1-tests_jar-_-any-2104647829777090758/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T10:56:02,967 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@68913343{HTTP/1.1, (http/1.1)}{localhost:46253} 2024-12-09T10:56:02,968 INFO [Time-limited test {}] server.Server(415): Started @6593ms 2024-12-09T10:56:03,069 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T10:56:03,312 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T10:56:03,337 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T10:56:03,374 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T10:56:03,374 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T10:56:03,375 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T10:56:03,377 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@770eb78{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/hadoop.log.dir/,AVAILABLE} 2024-12-09T10:56:03,378 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28a4d72{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T10:56:03,564 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@51d3e980{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/java.io.tmpdir/jetty-localhost-41107-hadoop-hdfs-3_4_1-tests_jar-_-any-16025242491402099827/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T10:56:03,565 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@952b611{HTTP/1.1, (http/1.1)}{localhost:41107} 2024-12-09T10:56:03,565 INFO [Time-limited test {}] server.Server(415): Started @7191ms 2024-12-09T10:56:03,572 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T10:56:03,780 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T10:56:03,799 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data1/current/BP-1191631881-172.17.0.2-1733741760972/current, will proceed with Du for space computation calculation, 2024-12-09T10:56:03,802 WARN [Thread-99 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data2/current/BP-1191631881-172.17.0.2-1733741760972/current, will proceed with Du for space computation calculation, 2024-12-09T10:56:03,807 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T10:56:03,811 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T10:56:03,812 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T10:56:03,812 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T10:56:03,814 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4656ace7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/hadoop.log.dir/,AVAILABLE} 2024-12-09T10:56:03,815 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1e7a55e0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T10:56:03,923 WARN [Thread-111 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data3/current/BP-1191631881-172.17.0.2-1733741760972/current, will proceed with Du for space computation calculation, 2024-12-09T10:56:03,930 WARN [Thread-112 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data4/current/BP-1191631881-172.17.0.2-1733741760972/current, will proceed with Du for space computation calculation, 2024-12-09T10:56:03,949 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T10:56:04,003 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@64abaf74{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/java.io.tmpdir/jetty-localhost-35723-hadoop-hdfs-3_4_1-tests_jar-_-any-76127910755313235/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T10:56:04,005 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@8ce124a{HTTP/1.1, (http/1.1)}{localhost:35723} 2024-12-09T10:56:04,005 INFO [Time-limited test {}] server.Server(415): Started @7631ms 2024-12-09T10:56:04,009 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T10:56:04,014 WARN [Thread-83 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T10:56:04,066 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8361bb51259a1d72 with lease ID 0xc90392095d7112c6: Processing first storage report for DS-72528082-dc25-49fc-b312-bbbc6bcb4023 from datanode DatanodeRegistration(127.0.0.1:44813, datanodeUuid=f40cb86e-bff3-43c1-9868-6a09b88b8d36, infoPort=38997, infoSecurePort=0, ipcPort=44147, storageInfo=lv=-57;cid=testClusterID;nsid=135393754;c=1733741760972) 2024-12-09T10:56:04,067 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8361bb51259a1d72 with lease ID 0xc90392095d7112c6: from storage DS-72528082-dc25-49fc-b312-bbbc6bcb4023 node DatanodeRegistration(127.0.0.1:44813, datanodeUuid=f40cb86e-bff3-43c1-9868-6a09b88b8d36, infoPort=38997, infoSecurePort=0, ipcPort=44147, storageInfo=lv=-57;cid=testClusterID;nsid=135393754;c=1733741760972), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-09T10:56:04,067 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xecc1f7276159d54b with lease ID 0xc90392095d7112c7: Processing first storage report for DS-0ea321db-fa57-4e5a-b83c-91cd41720647 from datanode DatanodeRegistration(127.0.0.1:45147, datanodeUuid=bf1ce84d-1b3c-4418-b6e9-82203d5f965c, infoPort=36613, infoSecurePort=0, ipcPort=34213, storageInfo=lv=-57;cid=testClusterID;nsid=135393754;c=1733741760972) 2024-12-09T10:56:04,067 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xecc1f7276159d54b with lease ID 0xc90392095d7112c7: from storage DS-0ea321db-fa57-4e5a-b83c-91cd41720647 node DatanodeRegistration(127.0.0.1:45147, datanodeUuid=bf1ce84d-1b3c-4418-b6e9-82203d5f965c, infoPort=36613, infoSecurePort=0, ipcPort=34213, storageInfo=lv=-57;cid=testClusterID;nsid=135393754;c=1733741760972), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T10:56:04,068 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8361bb51259a1d72 with lease ID 0xc90392095d7112c6: Processing first storage report for DS-72a63534-46c0-4341-a721-733a0aa87dcd from datanode DatanodeRegistration(127.0.0.1:44813, datanodeUuid=f40cb86e-bff3-43c1-9868-6a09b88b8d36, infoPort=38997, infoSecurePort=0, ipcPort=44147, storageInfo=lv=-57;cid=testClusterID;nsid=135393754;c=1733741760972) 2024-12-09T10:56:04,068 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8361bb51259a1d72 with lease ID 0xc90392095d7112c6: from storage DS-72a63534-46c0-4341-a721-733a0aa87dcd node DatanodeRegistration(127.0.0.1:44813, datanodeUuid=f40cb86e-bff3-43c1-9868-6a09b88b8d36, infoPort=38997, infoSecurePort=0, ipcPort=44147, storageInfo=lv=-57;cid=testClusterID;nsid=135393754;c=1733741760972), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T10:56:04,068 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xecc1f7276159d54b with lease ID 0xc90392095d7112c7: Processing first storage report for DS-be693a54-50ea-4e99-a920-9c16f87f78bf from datanode DatanodeRegistration(127.0.0.1:45147, datanodeUuid=bf1ce84d-1b3c-4418-b6e9-82203d5f965c, infoPort=36613, infoSecurePort=0, ipcPort=34213, storageInfo=lv=-57;cid=testClusterID;nsid=135393754;c=1733741760972) 2024-12-09T10:56:04,068 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xecc1f7276159d54b with lease ID 0xc90392095d7112c7: from storage DS-be693a54-50ea-4e99-a920-9c16f87f78bf node DatanodeRegistration(127.0.0.1:45147, datanodeUuid=bf1ce84d-1b3c-4418-b6e9-82203d5f965c, infoPort=36613, infoSecurePort=0, ipcPort=34213, storageInfo=lv=-57;cid=testClusterID;nsid=135393754;c=1733741760972), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T10:56:04,243 WARN [Thread-140 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data6/current/BP-1191631881-172.17.0.2-1733741760972/current, will proceed with Du for space computation calculation, 2024-12-09T10:56:04,244 WARN [Thread-139 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data5/current/BP-1191631881-172.17.0.2-1733741760972/current, will proceed with Du for space computation calculation, 2024-12-09T10:56:04,322 WARN [Thread-129 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T10:56:04,331 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfa43f74984d785a4 with lease ID 0xc90392095d7112c8: Processing first storage report for DS-cbedc1ca-5b7c-4e25-a21f-16d809984ba5 from datanode DatanodeRegistration(127.0.0.1:34611, datanodeUuid=11ec4edf-37c8-4ed3-b355-230e28c31c72, infoPort=33003, infoSecurePort=0, ipcPort=37743, storageInfo=lv=-57;cid=testClusterID;nsid=135393754;c=1733741760972) 2024-12-09T10:56:04,331 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfa43f74984d785a4 with lease ID 0xc90392095d7112c8: from storage DS-cbedc1ca-5b7c-4e25-a21f-16d809984ba5 node DatanodeRegistration(127.0.0.1:34611, datanodeUuid=11ec4edf-37c8-4ed3-b355-230e28c31c72, infoPort=33003, infoSecurePort=0, ipcPort=37743, storageInfo=lv=-57;cid=testClusterID;nsid=135393754;c=1733741760972), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T10:56:04,332 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfa43f74984d785a4 with lease ID 0xc90392095d7112c8: Processing first storage report for DS-1873d96e-446b-40c7-8482-acc1cbbaa883 from datanode DatanodeRegistration(127.0.0.1:34611, datanodeUuid=11ec4edf-37c8-4ed3-b355-230e28c31c72, infoPort=33003, infoSecurePort=0, ipcPort=37743, storageInfo=lv=-57;cid=testClusterID;nsid=135393754;c=1733741760972) 2024-12-09T10:56:04,332 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfa43f74984d785a4 with lease ID 0xc90392095d7112c8: from storage DS-1873d96e-446b-40c7-8482-acc1cbbaa883 node DatanodeRegistration(127.0.0.1:34611, datanodeUuid=11ec4edf-37c8-4ed3-b355-230e28c31c72, infoPort=33003, infoSecurePort=0, ipcPort=37743, storageInfo=lv=-57;cid=testClusterID;nsid=135393754;c=1733741760972), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T10:56:04,502 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961 2024-12-09T10:56:04,624 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/zookeeper_0, clientPort=57831, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T10:56:04,640 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57831 2024-12-09T10:56:04,666 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T10:56:04,672 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T10:56:05,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741825_1001 (size=7) 2024-12-09T10:56:05,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741825_1001 (size=7) 2024-12-09T10:56:05,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741825_1001 (size=7) 2024-12-09T10:56:05,580 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 with version=8 2024-12-09T10:56:05,580 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/hbase-staging 2024-12-09T10:56:05,770 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-09T10:56:06,178 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3469f9ca0af3:0 server-side Connection retries=45 2024-12-09T10:56:06,191 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T10:56:06,193 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T10:56:06,200 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T10:56:06,201 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T10:56:06,201 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T10:56:06,374 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T10:56:06,476 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-09T10:56:06,488 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-09T10:56:06,495 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T10:56:06,529 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 26555 (auto-detected) 2024-12-09T10:56:06,531 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-09T10:56:06,558 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35815 2024-12-09T10:56:06,589 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35815 connecting to ZooKeeper ensemble=127.0.0.1:57831 2024-12-09T10:56:06,644 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:358150x0, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T10:56:06,654 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35815-0x100bd63feef0000 connected 2024-12-09T10:56:06,697 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T10:56:06,701 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T10:56:06,719 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T10:56:06,725 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6, hbase.cluster.distributed=false 2024-12-09T10:56:06,776 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T10:56:06,786 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35815 2024-12-09T10:56:06,786 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35815 2024-12-09T10:56:06,790 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35815 2024-12-09T10:56:06,794 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35815 2024-12-09T10:56:06,798 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35815 2024-12-09T10:56:06,938 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3469f9ca0af3:0 server-side Connection retries=45 2024-12-09T10:56:06,941 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T10:56:06,941 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T10:56:06,941 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T10:56:06,942 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T10:56:06,942 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T10:56:06,946 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T10:56:06,949 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T10:56:06,956 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39691 2024-12-09T10:56:06,960 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39691 connecting to ZooKeeper ensemble=127.0.0.1:57831 2024-12-09T10:56:06,962 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T10:56:06,967 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T10:56:06,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:396910x0, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T10:56:06,983 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39691-0x100bd63feef0001 connected 2024-12-09T10:56:06,987 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T10:56:06,993 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T10:56:07,003 INFO [Time-limited test {}] mob.MobFileCache(128): MobFileCache disabled 2024-12-09T10:56:07,007 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T10:56:07,016 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T10:56:07,018 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39691 2024-12-09T10:56:07,019 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39691 2024-12-09T10:56:07,022 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39691 2024-12-09T10:56:07,023 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39691 2024-12-09T10:56:07,023 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39691 2024-12-09T10:56:07,045 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3469f9ca0af3:0 server-side Connection retries=45 2024-12-09T10:56:07,045 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T10:56:07,045 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T10:56:07,046 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T10:56:07,046 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T10:56:07,046 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T10:56:07,046 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T10:56:07,047 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T10:56:07,049 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33293 2024-12-09T10:56:07,051 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33293 connecting to ZooKeeper ensemble=127.0.0.1:57831 2024-12-09T10:56:07,052 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T10:56:07,055 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T10:56:07,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:332930x0, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T10:56:07,069 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33293-0x100bd63feef0002 connected 2024-12-09T10:56:07,069 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T10:56:07,070 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T10:56:07,071 INFO [Time-limited test {}] mob.MobFileCache(128): MobFileCache disabled 2024-12-09T10:56:07,073 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T10:56:07,075 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T10:56:07,076 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33293 2024-12-09T10:56:07,076 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33293 2024-12-09T10:56:07,078 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33293 2024-12-09T10:56:07,086 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33293 2024-12-09T10:56:07,087 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33293 2024-12-09T10:56:07,109 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3469f9ca0af3:0 server-side Connection retries=45 2024-12-09T10:56:07,109 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T10:56:07,110 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T10:56:07,111 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T10:56:07,111 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T10:56:07,111 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T10:56:07,111 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T10:56:07,112 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T10:56:07,114 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42349 2024-12-09T10:56:07,117 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42349 connecting to ZooKeeper ensemble=127.0.0.1:57831 2024-12-09T10:56:07,118 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T10:56:07,121 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T10:56:07,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:423490x0, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T10:56:07,128 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:423490x0, quorum=127.0.0.1:57831, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T10:56:07,128 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42349-0x100bd63feef0003 connected 2024-12-09T10:56:07,129 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T10:56:07,129 INFO [Time-limited test {}] mob.MobFileCache(128): MobFileCache disabled 2024-12-09T10:56:07,130 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T10:56:07,133 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T10:56:07,135 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42349 2024-12-09T10:56:07,135 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42349 2024-12-09T10:56:07,138 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42349 2024-12-09T10:56:07,142 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42349 2024-12-09T10:56:07,144 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42349 2024-12-09T10:56:07,163 DEBUG [M:0;3469f9ca0af3:35815 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3469f9ca0af3:35815 2024-12-09T10:56:07,166 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3469f9ca0af3,35815,1733741765917 2024-12-09T10:56:07,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T10:56:07,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T10:56:07,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T10:56:07,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T10:56:07,182 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3469f9ca0af3,35815,1733741765917 2024-12-09T10:56:07,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T10:56:07,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T10:56:07,213 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T10:56:07,213 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T10:56:07,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T10:56:07,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T10:56:07,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T10:56:07,218 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T10:56:07,231 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3469f9ca0af3,35815,1733741765917 from backup master directory 2024-12-09T10:56:07,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3469f9ca0af3,35815,1733741765917 2024-12-09T10:56:07,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T10:56:07,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T10:56:07,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T10:56:07,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T10:56:07,237 WARN [master/3469f9ca0af3:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T10:56:07,237 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3469f9ca0af3,35815,1733741765917 2024-12-09T10:56:07,241 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-09T10:56:07,243 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-09T10:56:07,325 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/hbase.id] with ID: be837b94-00f8-48cd-b1fd-571be3b11602 2024-12-09T10:56:07,325 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.tmp/hbase.id 2024-12-09T10:56:07,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741826_1002 (size=42) 2024-12-09T10:56:07,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741826_1002 (size=42) 2024-12-09T10:56:07,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741826_1002 (size=42) 2024-12-09T10:56:07,368 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.tmp/hbase.id]:[hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/hbase.id] 2024-12-09T10:56:07,443 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T10:56:07,450 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T10:56:07,480 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 28ms. 2024-12-09T10:56:07,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T10:56:07,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T10:56:07,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T10:56:07,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T10:56:07,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741827_1003 (size=196) 2024-12-09T10:56:07,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741827_1003 (size=196) 2024-12-09T10:56:07,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741827_1003 (size=196) 2024-12-09T10:56:07,545 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T10:56:07,548 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T10:56:07,571 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:150) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:174) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:262) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:231) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:400) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T10:56:07,576 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T10:56:07,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741828_1004 (size=1189) 2024-12-09T10:56:07,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741828_1004 (size=1189) 2024-12-09T10:56:07,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741828_1004 (size=1189) 2024-12-09T10:56:07,666 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/MasterData/data/master/store 2024-12-09T10:56:07,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741829_1005 (size=34) 2024-12-09T10:56:07,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741829_1005 (size=34) 2024-12-09T10:56:07,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741829_1005 (size=34) 2024-12-09T10:56:07,717 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-09T10:56:07,723 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T10:56:07,725 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T10:56:07,726 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T10:56:07,726 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T10:56:07,728 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T10:56:07,728 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T10:56:07,728 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T10:56:07,731 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733741767725Disabling compacts and flushes for region at 1733741767725Disabling writes for close at 1733741767728 (+3 ms)Writing region close event to WAL at 1733741767728Closed at 1733741767728 2024-12-09T10:56:07,738 WARN [master/3469f9ca0af3:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/MasterData/data/master/store/.initializing 2024-12-09T10:56:07,739 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/MasterData/WALs/3469f9ca0af3,35815,1733741765917 2024-12-09T10:56:07,753 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T10:56:07,777 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3469f9ca0af3%2C35815%2C1733741765917, suffix=, logDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/MasterData/WALs/3469f9ca0af3,35815,1733741765917, archiveDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/MasterData/oldWALs, maxLogs=10 2024-12-09T10:56:07,817 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/MasterData/WALs/3469f9ca0af3,35815,1733741765917/3469f9ca0af3%2C35815%2C1733741765917.1733741767784, exclude list is [], retry=0 2024-12-09T10:56:07,870 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45147,DS-0ea321db-fa57-4e5a-b83c-91cd41720647,DISK] 2024-12-09T10:56:07,870 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34611,DS-cbedc1ca-5b7c-4e25-a21f-16d809984ba5,DISK] 2024-12-09T10:56:07,884 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44813,DS-72528082-dc25-49fc-b312-bbbc6bcb4023,DISK] 2024-12-09T10:56:07,886 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-09T10:56:07,967 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/MasterData/WALs/3469f9ca0af3,35815,1733741765917/3469f9ca0af3%2C35815%2C1733741765917.1733741767784 2024-12-09T10:56:07,980 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38997:38997),(127.0.0.1/127.0.0.1:33003:33003),(127.0.0.1/127.0.0.1:36613:36613)] 2024-12-09T10:56:07,981 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T10:56:07,981 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T10:56:07,988 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T10:56:07,990 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T10:56:08,077 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T10:56:08,122 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T10:56:08,128 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:56:08,132 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T10:56:08,133 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T10:56:08,138 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T10:56:08,139 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:56:08,142 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T10:56:08,142 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T10:56:08,146 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T10:56:08,147 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:56:08,148 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T10:56:08,149 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T10:56:08,159 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T10:56:08,159 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:56:08,162 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T10:56:08,162 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T10:56:08,172 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T10:56:08,174 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T10:56:08,185 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T10:56:08,187 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T10:56:08,193 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T10:56:08,199 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T10:56:08,207 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T10:56:08,210 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64934690, jitterRate=-0.032397717237472534}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T10:56:08,227 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733741768014Initializing all the Stores at 1733741768019 (+5 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733741768022 (+3 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733741768028 (+6 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733741768033 (+5 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733741768033Cleaning up temporary data from old regions at 1733741768187 (+154 ms)Region opened successfully at 1733741768227 (+40 ms) 2024-12-09T10:56:08,229 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T10:56:08,286 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c013b9d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3469f9ca0af3/172.17.0.2:0 2024-12-09T10:56:08,342 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T10:56:08,361 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T10:56:08,362 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T10:56:08,369 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T10:56:08,373 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 4 msec 2024-12-09T10:56:08,383 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 8 msec 2024-12-09T10:56:08,383 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T10:56:08,449 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T10:56:08,461 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T10:56:08,463 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T10:56:08,467 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T10:56:08,469 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T10:56:08,471 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T10:56:08,475 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T10:56:08,484 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T10:56:08,486 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T10:56:08,487 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T10:56:08,490 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T10:56:08,510 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T10:56:08,517 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T10:56:08,528 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T10:56:08,528 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T10:56:08,528 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T10:56:08,528 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T10:56:08,528 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T10:56:08,528 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T10:56:08,528 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T10:56:08,528 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T10:56:08,535 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3469f9ca0af3,35815,1733741765917, sessionid=0x100bd63feef0000, setting cluster-up flag (Was=false) 2024-12-09T10:56:08,553 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T10:56:08,553 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T10:56:08,554 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T10:56:08,555 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T10:56:08,563 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T10:56:08,565 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3469f9ca0af3,35815,1733741765917 2024-12-09T10:56:08,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T10:56:08,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T10:56:08,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T10:56:08,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T10:56:08,585 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T10:56:08,588 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3469f9ca0af3,35815,1733741765917 2024-12-09T10:56:08,599 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T10:56:08,647 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] master.HMaster(3441): Registered master coprocessor service: service=AccessControlService 2024-12-09T10:56:08,655 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T10:56:08,655 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver loaded, priority=536870912. 2024-12-09T10:56:08,657 INFO [RS:0;3469f9ca0af3:39691 {}] regionserver.HRegionServer(746): ClusterId : be837b94-00f8-48cd-b1fd-571be3b11602 2024-12-09T10:56:08,657 INFO [RS:2;3469f9ca0af3:42349 {}] regionserver.HRegionServer(746): ClusterId : be837b94-00f8-48cd-b1fd-571be3b11602 2024-12-09T10:56:08,658 INFO [RS:1;3469f9ca0af3:33293 {}] regionserver.HRegionServer(746): ClusterId : be837b94-00f8-48cd-b1fd-571be3b11602 2024-12-09T10:56:08,660 DEBUG [RS:2;3469f9ca0af3:42349 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T10:56:08,660 DEBUG [RS:0;3469f9ca0af3:39691 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T10:56:08,660 DEBUG [RS:1;3469f9ca0af3:33293 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T10:56:08,667 DEBUG [RS:1;3469f9ca0af3:33293 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T10:56:08,667 DEBUG [RS:2;3469f9ca0af3:42349 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T10:56:08,668 DEBUG [RS:2;3469f9ca0af3:42349 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T10:56:08,668 DEBUG [RS:1;3469f9ca0af3:33293 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T10:56:08,668 DEBUG [RS:0;3469f9ca0af3:39691 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T10:56:08,668 DEBUG [RS:0;3469f9ca0af3:39691 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T10:56:08,672 DEBUG [RS:2;3469f9ca0af3:42349 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T10:56:08,673 DEBUG [RS:0;3469f9ca0af3:39691 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T10:56:08,673 DEBUG [RS:2;3469f9ca0af3:42349 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5fdfd27a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3469f9ca0af3/172.17.0.2:0 2024-12-09T10:56:08,674 DEBUG [RS:0;3469f9ca0af3:39691 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c31c4e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3469f9ca0af3/172.17.0.2:0 2024-12-09T10:56:08,673 DEBUG [RS:1;3469f9ca0af3:33293 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T10:56:08,676 DEBUG [RS:1;3469f9ca0af3:33293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4db976a5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3469f9ca0af3/172.17.0.2:0 2024-12-09T10:56:08,694 DEBUG [RS:1;3469f9ca0af3:33293 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;3469f9ca0af3:33293 2024-12-09T10:56:08,695 DEBUG [RS:0;3469f9ca0af3:39691 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3469f9ca0af3:39691 2024-12-09T10:56:08,696 DEBUG [RS:2;3469f9ca0af3:42349 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;3469f9ca0af3:42349 2024-12-09T10:56:08,698 INFO [RS:1;3469f9ca0af3:33293 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T10:56:08,698 INFO [RS:0;3469f9ca0af3:39691 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T10:56:08,698 INFO [RS:1;3469f9ca0af3:33293 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T10:56:08,698 INFO [RS:0;3469f9ca0af3:39691 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T10:56:08,699 DEBUG [RS:1;3469f9ca0af3:33293 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-09T10:56:08,699 INFO [RS:2;3469f9ca0af3:42349 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T10:56:08,699 DEBUG [RS:0;3469f9ca0af3:39691 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-09T10:56:08,699 INFO [RS:2;3469f9ca0af3:42349 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T10:56:08,699 DEBUG [RS:2;3469f9ca0af3:42349 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-09T10:56:08,699 INFO [RS:1;3469f9ca0af3:33293 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T10:56:08,699 INFO [RS:0;3469f9ca0af3:39691 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T10:56:08,699 DEBUG [RS:1;3469f9ca0af3:33293 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T10:56:08,699 DEBUG [RS:0;3469f9ca0af3:39691 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T10:56:08,700 INFO [RS:2;3469f9ca0af3:42349 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T10:56:08,700 DEBUG [RS:2;3469f9ca0af3:42349 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T10:56:08,704 INFO [RS:2;3469f9ca0af3:42349 {}] regionserver.HRegionServer(2659): reportForDuty to master=3469f9ca0af3,35815,1733741765917 with port=42349, startcode=1733741767108 2024-12-09T10:56:08,704 INFO [RS:0;3469f9ca0af3:39691 {}] regionserver.HRegionServer(2659): reportForDuty to master=3469f9ca0af3,35815,1733741765917 with port=39691, startcode=1733741766880 2024-12-09T10:56:08,704 INFO [RS:1;3469f9ca0af3:33293 {}] regionserver.HRegionServer(2659): reportForDuty to master=3469f9ca0af3,35815,1733741765917 with port=33293, startcode=1733741767044 2024-12-09T10:56:08,720 DEBUG [RS:0;3469f9ca0af3:39691 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T10:56:08,720 DEBUG [RS:1;3469f9ca0af3:33293 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T10:56:08,720 DEBUG [RS:2;3469f9ca0af3:42349 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T10:56:08,726 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T10:56:08,743 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T10:56:08,762 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T10:56:08,781 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54575, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T10:56:08,781 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54443, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T10:56:08,782 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55603, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T10:56:08,768 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3469f9ca0af3,35815,1733741765917 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T10:56:08,790 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35815 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-09T10:56:08,796 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3469f9ca0af3:0, corePoolSize=5, maxPoolSize=5 2024-12-09T10:56:08,796 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3469f9ca0af3:0, corePoolSize=5, maxPoolSize=5 2024-12-09T10:56:08,797 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3469f9ca0af3:0, corePoolSize=5, maxPoolSize=5 2024-12-09T10:56:08,797 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3469f9ca0af3:0, corePoolSize=5, maxPoolSize=5 2024-12-09T10:56:08,797 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3469f9ca0af3:0, corePoolSize=10, maxPoolSize=10 2024-12-09T10:56:08,797 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3469f9ca0af3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T10:56:08,797 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35815 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-09T10:56:08,797 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3469f9ca0af3:0, corePoolSize=2, maxPoolSize=2 2024-12-09T10:56:08,798 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3469f9ca0af3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T10:56:08,798 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35815 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-09T10:56:08,808 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733741798808 2024-12-09T10:56:08,809 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T10:56:08,809 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T10:56:08,811 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T10:56:08,813 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T10:56:08,817 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T10:56:08,817 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:56:08,817 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T10:56:08,818 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T10:56:08,818 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T10:56:08,818 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T10:56:08,823 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T10:56:08,830 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T10:56:08,832 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T10:56:08,832 DEBUG [RS:1;3469f9ca0af3:33293 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-09T10:56:08,832 WARN [RS:1;3469f9ca0af3:33293 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-09T10:56:08,832 DEBUG [RS:2;3469f9ca0af3:42349 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-09T10:56:08,832 WARN [RS:2;3469f9ca0af3:42349 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-09T10:56:08,833 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T10:56:08,834 DEBUG [RS:0;3469f9ca0af3:39691 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-09T10:56:08,834 WARN [RS:0;3469f9ca0af3:39691 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-09T10:56:08,836 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T10:56:08,837 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T10:56:08,854 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3469f9ca0af3:0:becomeActiveMaster-HFileCleaner.large.0-1733741768839,5,FailOnTimeoutGroup] 2024-12-09T10:56:08,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741831_1007 (size=1321) 2024-12-09T10:56:08,862 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3469f9ca0af3:0:becomeActiveMaster-HFileCleaner.small.0-1733741768854,5,FailOnTimeoutGroup] 2024-12-09T10:56:08,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741831_1007 (size=1321) 2024-12-09T10:56:08,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741831_1007 (size=1321) 2024-12-09T10:56:08,862 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T10:56:08,865 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T10:56:08,866 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T10:56:08,866 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T10:56:08,866 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T10:56:08,867 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T10:56:08,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741832_1008 (size=32) 2024-12-09T10:56:08,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741832_1008 (size=32) 2024-12-09T10:56:08,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741832_1008 (size=32) 2024-12-09T10:56:08,884 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T10:56:08,886 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T10:56:08,889 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T10:56:08,889 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:56:08,890 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T10:56:08,890 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T10:56:08,893 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T10:56:08,893 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:56:08,895 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T10:56:08,896 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T10:56:08,899 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T10:56:08,899 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:56:08,900 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T10:56:08,901 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T10:56:08,903 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T10:56:08,903 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:56:08,904 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T10:56:08,905 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T10:56:08,906 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740 2024-12-09T10:56:08,907 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740 2024-12-09T10:56:08,910 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T10:56:08,910 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T10:56:08,912 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T10:56:08,915 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T10:56:08,924 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T10:56:08,925 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61477895, jitterRate=-0.08390797674655914}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T10:56:08,929 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733741768884Initializing all the Stores at 1733741768885 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733741768886 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733741768886Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733741768886Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733741768886Cleaning up temporary data from old regions at 1733741768910 (+24 ms)Region opened successfully at 1733741768929 (+19 ms) 2024-12-09T10:56:08,929 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T10:56:08,929 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T10:56:08,929 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T10:56:08,930 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T10:56:08,930 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T10:56:08,933 INFO [RS:1;3469f9ca0af3:33293 {}] regionserver.HRegionServer(2659): reportForDuty to master=3469f9ca0af3,35815,1733741765917 with port=33293, startcode=1733741767044 2024-12-09T10:56:08,934 INFO [RS:2;3469f9ca0af3:42349 {}] regionserver.HRegionServer(2659): reportForDuty to master=3469f9ca0af3,35815,1733741765917 with port=42349, startcode=1733741767108 2024-12-09T10:56:08,935 INFO [RS:0;3469f9ca0af3:39691 {}] regionserver.HRegionServer(2659): reportForDuty to master=3469f9ca0af3,35815,1733741765917 with port=39691, startcode=1733741766880 2024-12-09T10:56:08,936 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35815 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3469f9ca0af3,33293,1733741767044 2024-12-09T10:56:08,939 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T10:56:08,939 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733741768929Disabling compacts and flushes for region at 1733741768929Disabling writes for close at 1733741768930 (+1 ms)Writing region close event to WAL at 1733741768938 (+8 ms)Closed at 1733741768938 2024-12-09T10:56:08,940 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35815 {}] master.ServerManager(517): Registering regionserver=3469f9ca0af3,33293,1733741767044 2024-12-09T10:56:08,944 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T10:56:08,944 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T10:56:08,953 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35815 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3469f9ca0af3,39691,1733741766880 2024-12-09T10:56:08,953 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35815 {}] master.ServerManager(517): Registering regionserver=3469f9ca0af3,39691,1733741766880 2024-12-09T10:56:08,954 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T10:56:08,954 DEBUG [RS:1;3469f9ca0af3:33293 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T10:56:08,955 DEBUG [RS:1;3469f9ca0af3:33293 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35869 2024-12-09T10:56:08,955 DEBUG [RS:1;3469f9ca0af3:33293 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T10:56:08,962 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T10:56:08,964 DEBUG [RS:0;3469f9ca0af3:39691 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T10:56:08,965 DEBUG [RS:0;3469f9ca0af3:39691 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35869 2024-12-09T10:56:08,965 DEBUG [RS:0;3469f9ca0af3:39691 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T10:56:08,966 DEBUG [RS:1;3469f9ca0af3:33293 {}] zookeeper.ZKUtil(111): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3469f9ca0af3,33293,1733741767044 2024-12-09T10:56:08,967 WARN [RS:1;3469f9ca0af3:33293 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T10:56:08,967 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35815 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3469f9ca0af3,42349,1733741767108 2024-12-09T10:56:08,967 INFO [RS:1;3469f9ca0af3:33293 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T10:56:08,967 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35815 {}] master.ServerManager(517): Registering regionserver=3469f9ca0af3,42349,1733741767108 2024-12-09T10:56:08,967 DEBUG [RS:1;3469f9ca0af3:33293 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/WALs/3469f9ca0af3,33293,1733741767044 2024-12-09T10:56:08,967 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T10:56:08,975 DEBUG [RS:0;3469f9ca0af3:39691 {}] zookeeper.ZKUtil(111): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3469f9ca0af3,39691,1733741766880 2024-12-09T10:56:08,975 WARN [RS:0;3469f9ca0af3:39691 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T10:56:08,975 INFO [RS:0;3469f9ca0af3:39691 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T10:56:08,975 DEBUG [RS:0;3469f9ca0af3:39691 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/WALs/3469f9ca0af3,39691,1733741766880 2024-12-09T10:56:08,977 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T10:56:08,978 DEBUG [RS:2;3469f9ca0af3:42349 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T10:56:08,978 DEBUG [RS:2;3469f9ca0af3:42349 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35869 2024-12-09T10:56:08,978 DEBUG [RS:2;3469f9ca0af3:42349 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T10:56:08,979 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3469f9ca0af3,39691,1733741766880] 2024-12-09T10:56:08,979 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3469f9ca0af3,33293,1733741767044] 2024-12-09T10:56:08,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T10:56:08,985 DEBUG [RS:2;3469f9ca0af3:42349 {}] zookeeper.ZKUtil(111): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3469f9ca0af3,42349,1733741767108 2024-12-09T10:56:08,985 WARN [RS:2;3469f9ca0af3:42349 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T10:56:08,985 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3469f9ca0af3,42349,1733741767108] 2024-12-09T10:56:08,985 INFO [RS:2;3469f9ca0af3:42349 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T10:56:08,986 DEBUG [RS:2;3469f9ca0af3:42349 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/WALs/3469f9ca0af3,42349,1733741767108 2024-12-09T10:56:09,021 INFO [RS:1;3469f9ca0af3:33293 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T10:56:09,021 INFO [RS:0;3469f9ca0af3:39691 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T10:56:09,021 INFO [RS:2;3469f9ca0af3:42349 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T10:56:09,056 INFO [RS:0;3469f9ca0af3:39691 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T10:56:09,056 INFO [RS:2;3469f9ca0af3:42349 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T10:56:09,058 INFO [RS:1;3469f9ca0af3:33293 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T10:56:09,064 INFO [RS:0;3469f9ca0af3:39691 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T10:56:09,064 INFO [RS:1;3469f9ca0af3:33293 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T10:56:09,064 INFO [RS:2;3469f9ca0af3:42349 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T10:56:09,065 INFO [RS:2;3469f9ca0af3:42349 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T10:56:09,065 INFO [RS:1;3469f9ca0af3:33293 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T10:56:09,065 INFO [RS:0;3469f9ca0af3:39691 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T10:56:09,066 INFO [RS:0;3469f9ca0af3:39691 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T10:56:09,070 INFO [RS:2;3469f9ca0af3:42349 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T10:56:09,074 INFO [RS:0;3469f9ca0af3:39691 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T10:56:09,075 INFO [RS:2;3469f9ca0af3:42349 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T10:56:09,076 INFO [RS:0;3469f9ca0af3:39691 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T10:56:09,076 INFO [RS:2;3469f9ca0af3:42349 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T10:56:09,077 DEBUG [RS:0;3469f9ca0af3:39691 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3469f9ca0af3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T10:56:09,077 DEBUG [RS:2;3469f9ca0af3:42349 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3469f9ca0af3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T10:56:09,077 DEBUG [RS:2;3469f9ca0af3:42349 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3469f9ca0af3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T10:56:09,077 DEBUG [RS:0;3469f9ca0af3:39691 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3469f9ca0af3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T10:56:09,077 DEBUG [RS:2;3469f9ca0af3:42349 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3469f9ca0af3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T10:56:09,077 DEBUG [RS:0;3469f9ca0af3:39691 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3469f9ca0af3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T10:56:09,077 DEBUG [RS:2;3469f9ca0af3:42349 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3469f9ca0af3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T10:56:09,077 DEBUG [RS:0;3469f9ca0af3:39691 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3469f9ca0af3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T10:56:09,077 DEBUG [RS:0;3469f9ca0af3:39691 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3469f9ca0af3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T10:56:09,077 DEBUG [RS:2;3469f9ca0af3:42349 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3469f9ca0af3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T10:56:09,078 DEBUG [RS:0;3469f9ca0af3:39691 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3469f9ca0af3:0, corePoolSize=2, maxPoolSize=2 2024-12-09T10:56:09,078 DEBUG [RS:2;3469f9ca0af3:42349 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3469f9ca0af3:0, corePoolSize=2, maxPoolSize=2 2024-12-09T10:56:09,078 DEBUG [RS:0;3469f9ca0af3:39691 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3469f9ca0af3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T10:56:09,078 DEBUG [RS:2;3469f9ca0af3:42349 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3469f9ca0af3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T10:56:09,078 DEBUG [RS:2;3469f9ca0af3:42349 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3469f9ca0af3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T10:56:09,078 DEBUG [RS:0;3469f9ca0af3:39691 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3469f9ca0af3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T10:56:09,078 DEBUG [RS:2;3469f9ca0af3:42349 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3469f9ca0af3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T10:56:09,078 DEBUG [RS:0;3469f9ca0af3:39691 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3469f9ca0af3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T10:56:09,078 DEBUG [RS:2;3469f9ca0af3:42349 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3469f9ca0af3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T10:56:09,078 DEBUG [RS:0;3469f9ca0af3:39691 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3469f9ca0af3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T10:56:09,078 DEBUG [RS:2;3469f9ca0af3:42349 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3469f9ca0af3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T10:56:09,078 DEBUG [RS:0;3469f9ca0af3:39691 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3469f9ca0af3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T10:56:09,079 DEBUG [RS:0;3469f9ca0af3:39691 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3469f9ca0af3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T10:56:09,079 DEBUG [RS:0;3469f9ca0af3:39691 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0, corePoolSize=3, maxPoolSize=3 2024-12-09T10:56:09,079 DEBUG [RS:2;3469f9ca0af3:42349 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3469f9ca0af3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T10:56:09,079 DEBUG [RS:0;3469f9ca0af3:39691 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3469f9ca0af3:0, corePoolSize=3, maxPoolSize=3 2024-12-09T10:56:09,079 DEBUG [RS:2;3469f9ca0af3:42349 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0, corePoolSize=3, maxPoolSize=3 2024-12-09T10:56:09,079 DEBUG [RS:2;3469f9ca0af3:42349 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3469f9ca0af3:0, corePoolSize=3, maxPoolSize=3 2024-12-09T10:56:09,082 INFO [RS:1;3469f9ca0af3:33293 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T10:56:09,084 INFO [RS:1;3469f9ca0af3:33293 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T10:56:09,084 INFO [RS:1;3469f9ca0af3:33293 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T10:56:09,084 DEBUG [RS:1;3469f9ca0af3:33293 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3469f9ca0af3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T10:56:09,084 DEBUG [RS:1;3469f9ca0af3:33293 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3469f9ca0af3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T10:56:09,085 DEBUG [RS:1;3469f9ca0af3:33293 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3469f9ca0af3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T10:56:09,085 DEBUG [RS:1;3469f9ca0af3:33293 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3469f9ca0af3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T10:56:09,085 DEBUG [RS:1;3469f9ca0af3:33293 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3469f9ca0af3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T10:56:09,085 DEBUG [RS:1;3469f9ca0af3:33293 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3469f9ca0af3:0, corePoolSize=2, maxPoolSize=2 2024-12-09T10:56:09,085 DEBUG [RS:1;3469f9ca0af3:33293 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3469f9ca0af3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T10:56:09,085 DEBUG [RS:1;3469f9ca0af3:33293 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3469f9ca0af3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T10:56:09,085 DEBUG [RS:1;3469f9ca0af3:33293 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3469f9ca0af3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T10:56:09,085 DEBUG [RS:1;3469f9ca0af3:33293 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3469f9ca0af3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T10:56:09,085 DEBUG [RS:1;3469f9ca0af3:33293 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3469f9ca0af3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T10:56:09,086 DEBUG [RS:1;3469f9ca0af3:33293 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3469f9ca0af3:0, corePoolSize=1, maxPoolSize=1 2024-12-09T10:56:09,086 DEBUG [RS:1;3469f9ca0af3:33293 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0, corePoolSize=3, maxPoolSize=3 2024-12-09T10:56:09,086 DEBUG [RS:1;3469f9ca0af3:33293 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3469f9ca0af3:0, corePoolSize=3, maxPoolSize=3 2024-12-09T10:56:09,095 INFO [RS:1;3469f9ca0af3:33293 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T10:56:09,095 INFO [RS:1;3469f9ca0af3:33293 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T10:56:09,095 INFO [RS:1;3469f9ca0af3:33293 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T10:56:09,095 INFO [RS:1;3469f9ca0af3:33293 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T10:56:09,095 INFO [RS:1;3469f9ca0af3:33293 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T10:56:09,095 INFO [RS:1;3469f9ca0af3:33293 {}] hbase.ChoreService(168): Chore ScheduledChore name=3469f9ca0af3,33293,1733741767044-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T10:56:09,106 INFO [RS:2;3469f9ca0af3:42349 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T10:56:09,106 INFO [RS:2;3469f9ca0af3:42349 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T10:56:09,106 INFO [RS:2;3469f9ca0af3:42349 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T10:56:09,106 INFO [RS:2;3469f9ca0af3:42349 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T10:56:09,106 INFO [RS:2;3469f9ca0af3:42349 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T10:56:09,107 INFO [RS:2;3469f9ca0af3:42349 {}] hbase.ChoreService(168): Chore ScheduledChore name=3469f9ca0af3,42349,1733741767108-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T10:56:09,110 INFO [RS:0;3469f9ca0af3:39691 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T10:56:09,110 INFO [RS:0;3469f9ca0af3:39691 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T10:56:09,110 INFO [RS:0;3469f9ca0af3:39691 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T10:56:09,110 INFO [RS:0;3469f9ca0af3:39691 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T10:56:09,110 INFO [RS:0;3469f9ca0af3:39691 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T10:56:09,110 INFO [RS:0;3469f9ca0af3:39691 {}] hbase.ChoreService(168): Chore ScheduledChore name=3469f9ca0af3,39691,1733741766880-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T10:56:09,129 WARN [3469f9ca0af3:35815 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T10:56:09,138 INFO [RS:1;3469f9ca0af3:33293 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T10:56:09,141 INFO [RS:1;3469f9ca0af3:33293 {}] hbase.ChoreService(168): Chore ScheduledChore name=3469f9ca0af3,33293,1733741767044-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T10:56:09,142 INFO [RS:1;3469f9ca0af3:33293 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T10:56:09,142 INFO [RS:1;3469f9ca0af3:33293 {}] regionserver.Replication(171): 3469f9ca0af3,33293,1733741767044 started 2024-12-09T10:56:09,148 INFO [RS:0;3469f9ca0af3:39691 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T10:56:09,148 INFO [RS:0;3469f9ca0af3:39691 {}] hbase.ChoreService(168): Chore ScheduledChore name=3469f9ca0af3,39691,1733741766880-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T10:56:09,148 INFO [RS:0;3469f9ca0af3:39691 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T10:56:09,148 INFO [RS:0;3469f9ca0af3:39691 {}] regionserver.Replication(171): 3469f9ca0af3,39691,1733741766880 started 2024-12-09T10:56:09,161 INFO [RS:2;3469f9ca0af3:42349 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T10:56:09,162 INFO [RS:2;3469f9ca0af3:42349 {}] hbase.ChoreService(168): Chore ScheduledChore name=3469f9ca0af3,42349,1733741767108-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T10:56:09,162 INFO [RS:2;3469f9ca0af3:42349 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T10:56:09,162 INFO [RS:2;3469f9ca0af3:42349 {}] regionserver.Replication(171): 3469f9ca0af3,42349,1733741767108 started 2024-12-09T10:56:09,172 INFO [RS:1;3469f9ca0af3:33293 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T10:56:09,173 INFO [RS:1;3469f9ca0af3:33293 {}] regionserver.HRegionServer(1482): Serving as 3469f9ca0af3,33293,1733741767044, RpcServer on 3469f9ca0af3/172.17.0.2:33293, sessionid=0x100bd63feef0002 2024-12-09T10:56:09,174 INFO [RS:0;3469f9ca0af3:39691 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T10:56:09,174 INFO [RS:0;3469f9ca0af3:39691 {}] regionserver.HRegionServer(1482): Serving as 3469f9ca0af3,39691,1733741766880, RpcServer on 3469f9ca0af3/172.17.0.2:39691, sessionid=0x100bd63feef0001 2024-12-09T10:56:09,174 DEBUG [RS:1;3469f9ca0af3:33293 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T10:56:09,174 DEBUG [RS:1;3469f9ca0af3:33293 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3469f9ca0af3,33293,1733741767044 2024-12-09T10:56:09,174 DEBUG [RS:0;3469f9ca0af3:39691 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T10:56:09,174 DEBUG [RS:0;3469f9ca0af3:39691 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3469f9ca0af3,39691,1733741766880 2024-12-09T10:56:09,174 DEBUG [RS:1;3469f9ca0af3:33293 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3469f9ca0af3,33293,1733741767044' 2024-12-09T10:56:09,174 DEBUG [RS:0;3469f9ca0af3:39691 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3469f9ca0af3,39691,1733741766880' 2024-12-09T10:56:09,174 DEBUG [RS:1;3469f9ca0af3:33293 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T10:56:09,174 DEBUG [RS:0;3469f9ca0af3:39691 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T10:56:09,175 DEBUG [RS:1;3469f9ca0af3:33293 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T10:56:09,175 DEBUG [RS:0;3469f9ca0af3:39691 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T10:56:09,176 DEBUG [RS:1;3469f9ca0af3:33293 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T10:56:09,177 DEBUG [RS:1;3469f9ca0af3:33293 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T10:56:09,177 DEBUG [RS:1;3469f9ca0af3:33293 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3469f9ca0af3,33293,1733741767044 2024-12-09T10:56:09,177 DEBUG [RS:0;3469f9ca0af3:39691 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T10:56:09,177 DEBUG [RS:0;3469f9ca0af3:39691 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T10:56:09,177 DEBUG [RS:1;3469f9ca0af3:33293 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3469f9ca0af3,33293,1733741767044' 2024-12-09T10:56:09,177 DEBUG [RS:0;3469f9ca0af3:39691 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3469f9ca0af3,39691,1733741766880 2024-12-09T10:56:09,177 DEBUG [RS:1;3469f9ca0af3:33293 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T10:56:09,177 DEBUG [RS:0;3469f9ca0af3:39691 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3469f9ca0af3,39691,1733741766880' 2024-12-09T10:56:09,177 DEBUG [RS:0;3469f9ca0af3:39691 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T10:56:09,178 DEBUG [RS:0;3469f9ca0af3:39691 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T10:56:09,180 DEBUG [RS:0;3469f9ca0af3:39691 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T10:56:09,180 INFO [RS:0;3469f9ca0af3:39691 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T10:56:09,180 INFO [RS:0;3469f9ca0af3:39691 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T10:56:09,188 INFO [RS:2;3469f9ca0af3:42349 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T10:56:09,189 INFO [RS:2;3469f9ca0af3:42349 {}] regionserver.HRegionServer(1482): Serving as 3469f9ca0af3,42349,1733741767108, RpcServer on 3469f9ca0af3/172.17.0.2:42349, sessionid=0x100bd63feef0003 2024-12-09T10:56:09,189 DEBUG [RS:2;3469f9ca0af3:42349 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T10:56:09,190 DEBUG [RS:2;3469f9ca0af3:42349 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3469f9ca0af3,42349,1733741767108 2024-12-09T10:56:09,190 DEBUG [RS:2;3469f9ca0af3:42349 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3469f9ca0af3,42349,1733741767108' 2024-12-09T10:56:09,190 DEBUG [RS:2;3469f9ca0af3:42349 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T10:56:09,192 DEBUG [RS:2;3469f9ca0af3:42349 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T10:56:09,193 DEBUG [RS:1;3469f9ca0af3:33293 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T10:56:09,194 DEBUG [RS:1;3469f9ca0af3:33293 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T10:56:09,194 INFO [RS:1;3469f9ca0af3:33293 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T10:56:09,194 INFO [RS:1;3469f9ca0af3:33293 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T10:56:09,197 DEBUG [RS:2;3469f9ca0af3:42349 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T10:56:09,198 DEBUG [RS:2;3469f9ca0af3:42349 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T10:56:09,199 DEBUG [RS:2;3469f9ca0af3:42349 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3469f9ca0af3,42349,1733741767108 2024-12-09T10:56:09,199 DEBUG [RS:2;3469f9ca0af3:42349 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3469f9ca0af3,42349,1733741767108' 2024-12-09T10:56:09,199 DEBUG [RS:2;3469f9ca0af3:42349 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T10:56:09,207 DEBUG [RS:2;3469f9ca0af3:42349 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T10:56:09,210 DEBUG [RS:2;3469f9ca0af3:42349 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T10:56:09,210 INFO [RS:2;3469f9ca0af3:42349 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T10:56:09,210 INFO [RS:2;3469f9ca0af3:42349 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T10:56:09,288 INFO [RS:0;3469f9ca0af3:39691 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T10:56:09,292 INFO [RS:0;3469f9ca0af3:39691 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3469f9ca0af3%2C39691%2C1733741766880, suffix=, logDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/WALs/3469f9ca0af3,39691,1733741766880, archiveDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/oldWALs, maxLogs=32 2024-12-09T10:56:09,295 INFO [RS:1;3469f9ca0af3:33293 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T10:56:09,307 INFO [RS:1;3469f9ca0af3:33293 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3469f9ca0af3%2C33293%2C1733741767044, suffix=, logDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/WALs/3469f9ca0af3,33293,1733741767044, archiveDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/oldWALs, maxLogs=32 2024-12-09T10:56:09,311 INFO [RS:2;3469f9ca0af3:42349 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T10:56:09,315 INFO [RS:2;3469f9ca0af3:42349 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3469f9ca0af3%2C42349%2C1733741767108, suffix=, logDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/WALs/3469f9ca0af3,42349,1733741767108, archiveDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/oldWALs, maxLogs=32 2024-12-09T10:56:09,328 DEBUG [RS:0;3469f9ca0af3:39691 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/WALs/3469f9ca0af3,39691,1733741766880/3469f9ca0af3%2C39691%2C1733741766880.1733741769298, exclude list is [], retry=0 2024-12-09T10:56:09,329 DEBUG [RS:1;3469f9ca0af3:33293 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/WALs/3469f9ca0af3,33293,1733741767044/3469f9ca0af3%2C33293%2C1733741767044.1733741769309, exclude list is [], retry=0 2024-12-09T10:56:09,337 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34611,DS-cbedc1ca-5b7c-4e25-a21f-16d809984ba5,DISK] 2024-12-09T10:56:09,338 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44813,DS-72528082-dc25-49fc-b312-bbbc6bcb4023,DISK] 2024-12-09T10:56:09,338 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45147,DS-0ea321db-fa57-4e5a-b83c-91cd41720647,DISK] 2024-12-09T10:56:09,339 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45147,DS-0ea321db-fa57-4e5a-b83c-91cd41720647,DISK] 2024-12-09T10:56:09,340 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44813,DS-72528082-dc25-49fc-b312-bbbc6bcb4023,DISK] 2024-12-09T10:56:09,341 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34611,DS-cbedc1ca-5b7c-4e25-a21f-16d809984ba5,DISK] 2024-12-09T10:56:09,344 DEBUG [RS:2;3469f9ca0af3:42349 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/WALs/3469f9ca0af3,42349,1733741767108/3469f9ca0af3%2C42349%2C1733741767108.1733741769317, exclude list is [], retry=0 2024-12-09T10:56:09,378 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34611,DS-cbedc1ca-5b7c-4e25-a21f-16d809984ba5,DISK] 2024-12-09T10:56:09,378 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45147,DS-0ea321db-fa57-4e5a-b83c-91cd41720647,DISK] 2024-12-09T10:56:09,378 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44813,DS-72528082-dc25-49fc-b312-bbbc6bcb4023,DISK] 2024-12-09T10:56:09,410 INFO [RS:0;3469f9ca0af3:39691 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/WALs/3469f9ca0af3,39691,1733741766880/3469f9ca0af3%2C39691%2C1733741766880.1733741769298 2024-12-09T10:56:09,422 INFO [RS:1;3469f9ca0af3:33293 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/WALs/3469f9ca0af3,33293,1733741767044/3469f9ca0af3%2C33293%2C1733741767044.1733741769309 2024-12-09T10:56:09,435 DEBUG [RS:0;3469f9ca0af3:39691 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36613:36613),(127.0.0.1/127.0.0.1:38997:38997),(127.0.0.1/127.0.0.1:33003:33003)] 2024-12-09T10:56:09,437 DEBUG [RS:1;3469f9ca0af3:33293 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36613:36613),(127.0.0.1/127.0.0.1:33003:33003),(127.0.0.1/127.0.0.1:38997:38997)] 2024-12-09T10:56:09,439 INFO [RS:2;3469f9ca0af3:42349 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/WALs/3469f9ca0af3,42349,1733741767108/3469f9ca0af3%2C42349%2C1733741767108.1733741769317 2024-12-09T10:56:09,446 DEBUG [RS:2;3469f9ca0af3:42349 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36613:36613),(127.0.0.1/127.0.0.1:38997:38997),(127.0.0.1/127.0.0.1:33003:33003)] 2024-12-09T10:56:09,637 DEBUG [3469f9ca0af3:35815 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-09T10:56:09,667 DEBUG [3469f9ca0af3:35815 {}] balancer.BalancerClusterState(204): Hosts are {3469f9ca0af3=0} racks are {/default-rack=0} 2024-12-09T10:56:09,702 DEBUG [3469f9ca0af3:35815 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T10:56:09,702 DEBUG [3469f9ca0af3:35815 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T10:56:09,702 DEBUG [3469f9ca0af3:35815 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T10:56:09,702 DEBUG [3469f9ca0af3:35815 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T10:56:09,702 DEBUG [3469f9ca0af3:35815 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T10:56:09,702 DEBUG [3469f9ca0af3:35815 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T10:56:09,702 INFO [3469f9ca0af3:35815 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T10:56:09,702 INFO [3469f9ca0af3:35815 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T10:56:09,702 INFO [3469f9ca0af3:35815 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T10:56:09,702 DEBUG [3469f9ca0af3:35815 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T10:56:09,715 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3469f9ca0af3,39691,1733741766880 2024-12-09T10:56:09,733 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3469f9ca0af3,39691,1733741766880, state=OPENING 2024-12-09T10:56:09,740 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T10:56:09,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T10:56:09,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T10:56:09,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T10:56:09,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T10:56:09,744 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T10:56:09,744 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T10:56:09,744 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T10:56:09,745 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T10:56:09,746 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T10:56:09,748 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3469f9ca0af3,39691,1733741766880}] 2024-12-09T10:56:09,945 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T10:56:09,951 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50533, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T10:56:09,974 INFO [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T10:56:09,974 INFO [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T10:56:09,976 INFO [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-09T10:56:09,983 INFO [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3469f9ca0af3%2C39691%2C1733741766880.meta, suffix=.meta, logDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/WALs/3469f9ca0af3,39691,1733741766880, archiveDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/oldWALs, maxLogs=32 2024-12-09T10:56:10,011 DEBUG [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/WALs/3469f9ca0af3,39691,1733741766880/3469f9ca0af3%2C39691%2C1733741766880.meta.1733741769988.meta, exclude list is [], retry=0 2024-12-09T10:56:10,015 WARN [IPC Server handler 2 on default port 35869 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T10:56:10,015 WARN [IPC Server handler 2 on default port 35869 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T10:56:10,015 WARN [IPC Server handler 2 on default port 35869 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T10:56:10,018 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45147,DS-0ea321db-fa57-4e5a-b83c-91cd41720647,DISK] 2024-12-09T10:56:10,018 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34611,DS-cbedc1ca-5b7c-4e25-a21f-16d809984ba5,DISK] 2024-12-09T10:56:10,049 INFO [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/WALs/3469f9ca0af3,39691,1733741766880/3469f9ca0af3%2C39691%2C1733741766880.meta.1733741769988.meta 2024-12-09T10:56:10,051 DEBUG [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36613:36613),(127.0.0.1/127.0.0.1:33003:33003)] 2024-12-09T10:56:10,052 DEBUG [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T10:56:10,055 DEBUG [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=AccessControlService 2024-12-09T10:56:10,057 INFO [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T10:56:10,058 DEBUG [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T10:56:10,060 DEBUG [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T10:56:10,079 INFO [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T10:56:10,090 DEBUG [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T10:56:10,091 DEBUG [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T10:56:10,091 DEBUG [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T10:56:10,091 DEBUG [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T10:56:10,121 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T10:56:10,123 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T10:56:10,124 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:56:10,125 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T10:56:10,125 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T10:56:10,130 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T10:56:10,130 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:56:10,134 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T10:56:10,134 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T10:56:10,136 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T10:56:10,136 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:56:10,138 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T10:56:10,139 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T10:56:10,147 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T10:56:10,147 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:56:10,152 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T10:56:10,153 DEBUG [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T10:56:10,155 DEBUG [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740 2024-12-09T10:56:10,159 DEBUG [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740 2024-12-09T10:56:10,162 DEBUG [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T10:56:10,162 DEBUG [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T10:56:10,163 DEBUG [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T10:56:10,168 DEBUG [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T10:56:10,171 INFO [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60063767, jitterRate=-0.10498012602329254}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T10:56:10,172 DEBUG [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T10:56:10,176 DEBUG [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733741770092Writing region info on filesystem at 1733741770092Initializing all the Stores at 1733741770103 (+11 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733741770103Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733741770118 (+15 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733741770118Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733741770118Cleaning up temporary data from old regions at 1733741770162 (+44 ms)Running coprocessor post-open hooks at 1733741770172 (+10 ms)Region opened successfully at 1733741770176 (+4 ms) 2024-12-09T10:56:10,187 INFO [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733741769932 2024-12-09T10:56:10,205 DEBUG [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T10:56:10,206 INFO [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T10:56:10,208 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3469f9ca0af3,39691,1733741766880 2024-12-09T10:56:10,211 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3469f9ca0af3,39691,1733741766880, state=OPEN 2024-12-09T10:56:10,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T10:56:10,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T10:56:10,217 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T10:56:10,217 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T10:56:10,218 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T10:56:10,218 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T10:56:10,218 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T10:56:10,218 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T10:56:10,222 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3469f9ca0af3,39691,1733741766880 2024-12-09T10:56:10,234 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T10:56:10,234 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3469f9ca0af3,39691,1733741766880 in 474 msec 2024-12-09T10:56:10,252 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T10:56:10,252 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T10:56:10,253 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T10:56:10,254 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 1.2830 sec 2024-12-09T10:56:10,290 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T10:56:10,292 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,39691,1733741766880, seqNum=-1] 2024-12-09T10:56:10,337 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T10:56:10,346 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57141, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T10:56:10,414 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.7340 sec 2024-12-09T10:56:10,415 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733741770415, completionTime=-1 2024-12-09T10:56:10,419 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-09T10:56:10,419 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T10:56:10,478 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-09T10:56:10,478 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733741830478 2024-12-09T10:56:10,478 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733741890478 2024-12-09T10:56:10,478 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 58 msec 2024-12-09T10:56:10,480 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-09T10:56:10,489 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3469f9ca0af3,35815,1733741765917-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T10:56:10,490 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3469f9ca0af3,35815,1733741765917-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T10:56:10,490 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3469f9ca0af3,35815,1733741765917-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T10:56:10,492 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3469f9ca0af3:35815, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T10:56:10,492 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T10:56:10,495 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T10:56:10,502 DEBUG [master/3469f9ca0af3:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T10:56:10,538 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 3.300sec 2024-12-09T10:56:10,543 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T10:56:10,545 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T10:56:10,547 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T10:56:10,547 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T10:56:10,548 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T10:56:10,549 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3469f9ca0af3,35815,1733741765917-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T10:56:10,549 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3469f9ca0af3,35815,1733741765917-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T10:56:10,574 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@182b8f75, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:56:10,580 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-09T10:56:10,580 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-09T10:56:10,587 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T10:56:10,589 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T10:56:10,590 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] client.AsyncConnectionImpl(321): The fetched master address is 3469f9ca0af3,35815,1733741765917 2024-12-09T10:56:10,591 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T10:56:10,593 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@6c2e176a 2024-12-09T10:56:10,596 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T10:56:10,601 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41755, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T10:56:10,606 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T10:56:10,609 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T10:56:10,610 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35815 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'hbase:acl', {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T10:56:10,610 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T10:56:10,610 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b9c4bca, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:56:10,611 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T10:56:10,614 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T10:56:10,617 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:56:10,619 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41348, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T10:56:10,623 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:acl 2024-12-09T10:56:10,630 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T10:56:10,631 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:56:10,686 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35815 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "hbase" qualifier: "acl" procId is: 4 2024-12-09T10:56:10,687 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78bead51, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:56:10,688 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T10:56:10,694 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T10:56:10,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T10:56:10,704 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,39691,1733741766880, seqNum=-1] 2024-12-09T10:56:10,705 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T10:56:10,735 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40862, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T10:56:10,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3469f9ca0af3,35815,1733741765917 2024-12-09T10:56:10,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2305): Starting mini mapreduce cluster... 2024-12-09T10:56:10,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/test.cache.data in system properties and HBase conf 2024-12-09T10:56:10,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T10:56:10,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/hadoop.log.dir in system properties and HBase conf 2024-12-09T10:56:10,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T10:56:10,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T10:56:10,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T10:56:10,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T10:56:10,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T10:56:10,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T10:56:10,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T10:56:10,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T10:56:10,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T10:56:10,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T10:56:10,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T10:56:10,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T10:56:10,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/nfs.dump.dir in system properties and HBase conf 2024-12-09T10:56:10,741 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/java.io.tmpdir in system properties and HBase conf 2024-12-09T10:56:10,741 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T10:56:10,741 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T10:56:10,741 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T10:56:10,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741837_1013 (size=349) 2024-12-09T10:56:10,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T10:56:10,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741837_1013 (size=349) 2024-12-09T10:56:10,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741837_1013 (size=349) 2024-12-09T10:56:10,836 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => a0d08af07fc0beaa578cbd208923b1fb, NAME => 'hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:acl', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T10:56:10,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741838_1014 (size=592039) 2024-12-09T10:56:10,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741838_1014 (size=592039) 2024-12-09T10:56:10,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741838_1014 (size=592039) 2024-12-09T10:56:10,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741839_1015 (size=36) 2024-12-09T10:56:10,929 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T10:56:10,929 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1722): Closing a0d08af07fc0beaa578cbd208923b1fb, disabling compactions & flushes 2024-12-09T10:56:10,929 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb. 2024-12-09T10:56:10,929 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb. 2024-12-09T10:56:10,929 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb. after waiting 0 ms 2024-12-09T10:56:10,930 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb. 2024-12-09T10:56:10,930 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1973): Closed hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb. 2024-12-09T10:56:10,930 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1676): Region close journal for a0d08af07fc0beaa578cbd208923b1fb: Waiting for close lock at 1733741770929Disabling compacts and flushes for region at 1733741770929Disabling writes for close at 1733741770929Writing region close event to WAL at 1733741770930 (+1 ms)Closed at 1733741770930 2024-12-09T10:56:10,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741839_1015 (size=36) 2024-12-09T10:56:10,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741839_1015 (size=36) 2024-12-09T10:56:10,934 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T10:56:10,942 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb.","families":{"info":[{"qualifier":"regioninfo","vlen":35,"tag":[],"timestamp":"1733741770935"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733741770935"}]},"ts":"1733741770935"} 2024-12-09T10:56:10,952 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T10:56:10,963 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T10:56:10,969 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733741770964"}]},"ts":"1733741770964"} 2024-12-09T10:56:10,982 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLING in hbase:meta 2024-12-09T10:56:10,984 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {3469f9ca0af3=0} racks are {/default-rack=0} 2024-12-09T10:56:10,990 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T10:56:10,991 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T10:56:10,991 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T10:56:10,991 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T10:56:10,991 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T10:56:10,991 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T10:56:10,991 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T10:56:10,991 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T10:56:10,991 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T10:56:10,991 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T10:56:10,993 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=a0d08af07fc0beaa578cbd208923b1fb, ASSIGN}] 2024-12-09T10:56:10,997 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=a0d08af07fc0beaa578cbd208923b1fb, ASSIGN 2024-12-09T10:56:11,000 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=a0d08af07fc0beaa578cbd208923b1fb, ASSIGN; state=OFFLINE, location=3469f9ca0af3,39691,1733741766880; forceNewPlan=false, retain=false 2024-12-09T10:56:11,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T10:56:11,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741840_1016 (size=1663647) 2024-12-09T10:56:11,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741840_1016 (size=1663647) 2024-12-09T10:56:11,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741840_1016 (size=1663647) 2024-12-09T10:56:11,154 INFO [3469f9ca0af3:35815 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-09T10:56:11,155 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a0d08af07fc0beaa578cbd208923b1fb, regionState=OPENING, regionLocation=3469f9ca0af3,39691,1733741766880 2024-12-09T10:56:11,164 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=a0d08af07fc0beaa578cbd208923b1fb, ASSIGN because future has completed 2024-12-09T10:56:11,165 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a0d08af07fc0beaa578cbd208923b1fb, server=3469f9ca0af3,39691,1733741766880}] 2024-12-09T10:56:11,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T10:56:11,392 INFO [RS_OPEN_PRIORITY_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(132): Open hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb. 2024-12-09T10:56:11,392 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => a0d08af07fc0beaa578cbd208923b1fb, NAME => 'hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb.', STARTKEY => '', ENDKEY => ''} 2024-12-09T10:56:11,393 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb. service=AccessControlService 2024-12-09T10:56:11,393 INFO [RS_OPEN_PRIORITY_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T10:56:11,393 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table acl a0d08af07fc0beaa578cbd208923b1fb 2024-12-09T10:56:11,394 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(898): Instantiated hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T10:56:11,394 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for a0d08af07fc0beaa578cbd208923b1fb 2024-12-09T10:56:11,394 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for a0d08af07fc0beaa578cbd208923b1fb 2024-12-09T10:56:11,398 INFO [StoreOpener-a0d08af07fc0beaa578cbd208923b1fb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family l of region a0d08af07fc0beaa578cbd208923b1fb 2024-12-09T10:56:11,404 INFO [StoreOpener-a0d08af07fc0beaa578cbd208923b1fb-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a0d08af07fc0beaa578cbd208923b1fb columnFamilyName l 2024-12-09T10:56:11,404 DEBUG [StoreOpener-a0d08af07fc0beaa578cbd208923b1fb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:56:11,406 INFO [StoreOpener-a0d08af07fc0beaa578cbd208923b1fb-1 {}] regionserver.HStore(327): Store=a0d08af07fc0beaa578cbd208923b1fb/l, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T10:56:11,407 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for a0d08af07fc0beaa578cbd208923b1fb 2024-12-09T10:56:11,408 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/acl/a0d08af07fc0beaa578cbd208923b1fb 2024-12-09T10:56:11,409 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/acl/a0d08af07fc0beaa578cbd208923b1fb 2024-12-09T10:56:11,412 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for a0d08af07fc0beaa578cbd208923b1fb 2024-12-09T10:56:11,412 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for a0d08af07fc0beaa578cbd208923b1fb 2024-12-09T10:56:11,417 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for a0d08af07fc0beaa578cbd208923b1fb 2024-12-09T10:56:11,424 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/acl/a0d08af07fc0beaa578cbd208923b1fb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T10:56:11,425 INFO [RS_OPEN_PRIORITY_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1114): Opened a0d08af07fc0beaa578cbd208923b1fb; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59856762, jitterRate=-0.10806474089622498}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T10:56:11,425 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a0d08af07fc0beaa578cbd208923b1fb 2024-12-09T10:56:11,428 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for a0d08af07fc0beaa578cbd208923b1fb: Running coprocessor pre-open hook at 1733741771394Writing region info on filesystem at 1733741771394Initializing all the Stores at 1733741771396 (+2 ms)Instantiating store for column family {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733741771396Cleaning up temporary data from old regions at 1733741771413 (+17 ms)Running coprocessor post-open hooks at 1733741771425 (+12 ms)Region opened successfully at 1733741771428 (+3 ms) 2024-12-09T10:56:11,431 INFO [RS_OPEN_PRIORITY_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb., pid=6, masterSystemTime=1733741771342 2024-12-09T10:56:11,439 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb. 2024-12-09T10:56:11,439 INFO [RS_OPEN_PRIORITY_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(153): Opened hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb. 2024-12-09T10:56:11,440 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a0d08af07fc0beaa578cbd208923b1fb, regionState=OPEN, openSeqNum=2, regionLocation=3469f9ca0af3,39691,1733741766880 2024-12-09T10:56:11,445 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a0d08af07fc0beaa578cbd208923b1fb, server=3469f9ca0af3,39691,1733741766880 because future has completed 2024-12-09T10:56:11,446 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35815 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=3469f9ca0af3,39691,1733741766880, table=hbase:acl, region=a0d08af07fc0beaa578cbd208923b1fb. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-09T10:56:11,461 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T10:56:11,462 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure a0d08af07fc0beaa578cbd208923b1fb, server=3469f9ca0af3,39691,1733741766880 in 286 msec 2024-12-09T10:56:11,465 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T10:56:11,465 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=a0d08af07fc0beaa578cbd208923b1fb, ASSIGN in 465 msec 2024-12-09T10:56:11,467 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T10:56:11,467 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733741771467"}]},"ts":"1733741771467"} 2024-12-09T10:56:11,470 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLED in hbase:meta 2024-12-09T10:56:11,472 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T10:56:11,476 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:acl in 856 msec 2024-12-09T10:56:11,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T10:56:11,856 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: hbase:acl completed 2024-12-09T10:56:11,878 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T10:56:11,879 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T10:56:11,880 INFO [master/3469f9ca0af3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3469f9ca0af3,35815,1733741765917-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T10:56:13,444 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T10:56:13,581 WARN [Thread-383 {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T10:56:13,987 INFO [Thread-383 {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T10:56:14,000 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-09T10:56:14,001 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T10:56:14,052 INFO [Thread-383 {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T10:56:14,052 INFO [Thread-383 {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T10:56:14,052 INFO [Thread-383 {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T10:56:14,098 INFO [Thread-383 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@666ff87d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/hadoop.log.dir/,AVAILABLE} 2024-12-09T10:56:14,099 INFO [Thread-383 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@663ccefb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-09T10:56:14,182 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T10:56:14,182 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T10:56:14,182 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T10:56:14,192 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T10:56:14,206 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6688010e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/hadoop.log.dir/,AVAILABLE} 2024-12-09T10:56:14,207 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2b8eb224{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-09T10:56:14,341 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver as a provider class 2024-12-09T10:56:14,341 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices as a root resource class 2024-12-09T10:56:14,341 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-09T10:56:14,343 INFO [Thread-383 {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-09T10:56:14,424 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-09T10:56:14,964 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-09T10:56:15,012 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T10:56:15,026 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:acl' 2024-12-09T10:56:15,399 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-09T10:56:15,434 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7148d4a4{cluster,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/java.io.tmpdir/jetty-localhost-34071-hadoop-yarn-common-3_4_1_jar-_-any-3319322714901961435/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-09T10:56:15,435 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5320dc1e{HTTP/1.1, (http/1.1)}{localhost:34071} 2024-12-09T10:56:15,435 INFO [Thread-383 {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@796a1222{jobhistory,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/java.io.tmpdir/jetty-localhost-42439-hadoop-yarn-common-3_4_1_jar-_-any-8396810423779118733/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-09T10:56:15,435 INFO [Time-limited test {}] server.Server(415): Started @19061ms 2024-12-09T10:56:15,436 INFO [Thread-383 {}] server.AbstractConnector(333): Started ServerConnector@56f1a1bb{HTTP/1.1, (http/1.1)}{localhost:42439} 2024-12-09T10:56:15,436 INFO [Thread-383 {}] server.Server(415): Started @19062ms 2024-12-09T10:56:15,620 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T10:56:15,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741841_1017 (size=5) 2024-12-09T10:56:15,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741841_1017 (size=5) 2024-12-09T10:56:15,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741841_1017 (size=5) 2024-12-09T10:56:16,470 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T10:56:16,471 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionServerObservers 2024-12-09T10:56:16,473 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T10:56:16,473 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-09T10:56:16,476 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-09T10:56:16,476 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_acl Metrics about Tables on a single HBase RegionServer 2024-12-09T10:56:16,477 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T10:56:16,477 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase MasterObservers 2024-12-09T10:56:16,478 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-09T10:56:16,478 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver Metrics about HBase MasterObservers 2024-12-09T10:56:16,478 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T10:56:16,478 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionObservers 2024-12-09T10:56:16,479 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T10:56:16,479 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-09T10:56:16,479 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-09T10:56:16,479 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-09T10:56:17,558 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-09T10:56:17,565 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T10:56:17,646 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-09T10:56:17,650 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T10:56:17,698 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T10:56:17,698 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T10:56:17,698 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T10:56:17,706 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T10:56:17,710 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f710d65{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/hadoop.log.dir/,AVAILABLE} 2024-12-09T10:56:17,711 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4819e16b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-09T10:56:17,793 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-09T10:56:17,793 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-09T10:56:17,793 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-09T10:56:17,794 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-09T10:56:17,806 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-09T10:56:17,828 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-09T10:56:17,981 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-09T10:56:17,996 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@ef45329{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/java.io.tmpdir/jetty-localhost-44253-hadoop-yarn-common-3_4_1_jar-_-any-685510334902939381/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-09T10:56:17,997 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5e0399ee{HTTP/1.1, (http/1.1)}{localhost:44253} 2024-12-09T10:56:17,997 INFO [Time-limited test {}] server.Server(415): Started @21623ms 2024-12-09T10:56:18,253 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-09T10:56:18,255 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T10:56:18,272 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-09T10:56:18,273 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T10:56:18,286 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T10:56:18,286 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T10:56:18,286 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T10:56:18,287 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T10:56:18,288 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@43d0fa3d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/hadoop.log.dir/,AVAILABLE} 2024-12-09T10:56:18,289 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5fe504e9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-09T10:56:18,360 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-09T10:56:18,360 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-09T10:56:18,360 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-09T10:56:18,360 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-09T10:56:18,385 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-09T10:56:18,393 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-09T10:56:18,553 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-09T10:56:18,564 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3ae57486{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/java.io.tmpdir/jetty-localhost-41841-hadoop-yarn-common-3_4_1_jar-_-any-8613027876382237959/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-09T10:56:18,575 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6f739d05{HTTP/1.1, (http/1.1)}{localhost:41841} 2024-12-09T10:56:18,575 INFO [Time-limited test {}] server.Server(415): Started @22201ms 2024-12-09T10:56:18,655 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2341): Mini mapreduce cluster started 2024-12-09T10:56:18,657 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T10:56:18,706 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSplitRegion Thread=713, OpenFileDescriptor=777, MaxFileDescriptor=1048576, SystemLoadAverage=444, ProcessCount=11, AvailableMemoryMB=7665 2024-12-09T10:56:18,709 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=713 is superior to 500 2024-12-09T10:56:18,715 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T10:56:18,730 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(321): The fetched master address is 3469f9ca0af3,35815,1733741765917 2024-12-09T10:56:18,730 DEBUG [Time-limited test {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@6e9eb7 2024-12-09T10:56:18,731 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T10:56:18,755 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45556, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T10:56:18,758 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T10:56:18,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T10:56:18,766 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T10:56:18,774 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSplitRegion" procId is: 7 2024-12-09T10:56:18,784 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T10:56:18,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T10:56:18,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741842_1018 (size=458) 2024-12-09T10:56:18,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741842_1018 (size=458) 2024-12-09T10:56:18,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741842_1018 (size=458) 2024-12-09T10:56:18,855 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => aea25906d47c6460c0b78ec0f095922f, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1733741778756.aea25906d47c6460c0b78ec0f095922f.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T10:56:18,863 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 8d369bd75555122cbc103d82c8629467, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1733741778756.8d369bd75555122cbc103d82c8629467.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T10:56:18,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T10:56:18,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741843_1019 (size=83) 2024-12-09T10:56:18,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741843_1019 (size=83) 2024-12-09T10:56:18,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741843_1019 (size=83) 2024-12-09T10:56:18,922 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,,1733741778756.aea25906d47c6460c0b78ec0f095922f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T10:56:18,922 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1722): Closing aea25906d47c6460c0b78ec0f095922f, disabling compactions & flushes 2024-12-09T10:56:18,922 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,,1733741778756.aea25906d47c6460c0b78ec0f095922f. 2024-12-09T10:56:18,922 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733741778756.aea25906d47c6460c0b78ec0f095922f. 2024-12-09T10:56:18,922 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733741778756.aea25906d47c6460c0b78ec0f095922f. after waiting 0 ms 2024-12-09T10:56:18,922 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,,1733741778756.aea25906d47c6460c0b78ec0f095922f. 2024-12-09T10:56:18,923 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,,1733741778756.aea25906d47c6460c0b78ec0f095922f. 2024-12-09T10:56:18,923 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for aea25906d47c6460c0b78ec0f095922f: Waiting for close lock at 1733741778922Disabling compacts and flushes for region at 1733741778922Disabling writes for close at 1733741778922Writing region close event to WAL at 1733741778923 (+1 ms)Closed at 1733741778923 2024-12-09T10:56:18,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741844_1020 (size=83) 2024-12-09T10:56:18,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741844_1020 (size=83) 2024-12-09T10:56:18,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741844_1020 (size=83) 2024-12-09T10:56:19,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T10:56:19,328 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,1,1733741778756.8d369bd75555122cbc103d82c8629467.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T10:56:19,328 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1722): Closing 8d369bd75555122cbc103d82c8629467, disabling compactions & flushes 2024-12-09T10:56:19,328 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,1,1733741778756.8d369bd75555122cbc103d82c8629467. 2024-12-09T10:56:19,328 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733741778756.8d369bd75555122cbc103d82c8629467. 2024-12-09T10:56:19,328 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733741778756.8d369bd75555122cbc103d82c8629467. after waiting 0 ms 2024-12-09T10:56:19,328 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,1,1733741778756.8d369bd75555122cbc103d82c8629467. 2024-12-09T10:56:19,328 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,1,1733741778756.8d369bd75555122cbc103d82c8629467. 2024-12-09T10:56:19,328 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for 8d369bd75555122cbc103d82c8629467: Waiting for close lock at 1733741779328Disabling compacts and flushes for region at 1733741779328Disabling writes for close at 1733741779328Writing region close event to WAL at 1733741779328Closed at 1733741779328 2024-12-09T10:56:19,332 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T10:56:19,333 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSplitRegion,,1733741778756.aea25906d47c6460c0b78ec0f095922f.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733741779332"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733741779332"}]},"ts":"1733741779332"} 2024-12-09T10:56:19,333 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSplitRegion,1,1733741778756.8d369bd75555122cbc103d82c8629467.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733741779332"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733741779332"}]},"ts":"1733741779332"} 2024-12-09T10:56:19,381 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-09T10:56:19,384 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T10:56:19,384 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733741779384"}]},"ts":"1733741779384"} 2024-12-09T10:56:19,390 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=ENABLING in hbase:meta 2024-12-09T10:56:19,391 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {3469f9ca0af3=0} racks are {/default-rack=0} 2024-12-09T10:56:19,394 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T10:56:19,394 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T10:56:19,394 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T10:56:19,394 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T10:56:19,394 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T10:56:19,394 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T10:56:19,394 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T10:56:19,394 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T10:56:19,394 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T10:56:19,395 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T10:56:19,395 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=aea25906d47c6460c0b78ec0f095922f, ASSIGN}, {pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=8d369bd75555122cbc103d82c8629467, ASSIGN}] 2024-12-09T10:56:19,398 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=aea25906d47c6460c0b78ec0f095922f, ASSIGN 2024-12-09T10:56:19,399 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=8d369bd75555122cbc103d82c8629467, ASSIGN 2024-12-09T10:56:19,401 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=aea25906d47c6460c0b78ec0f095922f, ASSIGN; state=OFFLINE, location=3469f9ca0af3,33293,1733741767044; forceNewPlan=false, retain=false 2024-12-09T10:56:19,401 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=8d369bd75555122cbc103d82c8629467, ASSIGN; state=OFFLINE, location=3469f9ca0af3,42349,1733741767108; forceNewPlan=false, retain=false 2024-12-09T10:56:19,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T10:56:19,552 INFO [3469f9ca0af3:35815 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-09T10:56:19,552 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=8d369bd75555122cbc103d82c8629467, regionState=OPENING, regionLocation=3469f9ca0af3,42349,1733741767108 2024-12-09T10:56:19,552 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=aea25906d47c6460c0b78ec0f095922f, regionState=OPENING, regionLocation=3469f9ca0af3,33293,1733741767044 2024-12-09T10:56:19,557 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=8d369bd75555122cbc103d82c8629467, ASSIGN because future has completed 2024-12-09T10:56:19,558 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8d369bd75555122cbc103d82c8629467, server=3469f9ca0af3,42349,1733741767108}] 2024-12-09T10:56:19,559 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=aea25906d47c6460c0b78ec0f095922f, ASSIGN because future has completed 2024-12-09T10:56:19,560 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure aea25906d47c6460c0b78ec0f095922f, server=3469f9ca0af3,33293,1733741767044}] 2024-12-09T10:56:19,713 DEBUG [RSProcedureDispatcher-pool-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T10:56:19,716 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T10:56:19,737 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38093, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T10:56:19,749 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55849, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T10:56:19,753 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSplitRegion,1,1733741778756.8d369bd75555122cbc103d82c8629467. 2024-12-09T10:56:19,753 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7752): Opening region: {ENCODED => 8d369bd75555122cbc103d82c8629467, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1733741778756.8d369bd75555122cbc103d82c8629467.', STARTKEY => '1', ENDKEY => ''} 2024-12-09T10:56:19,754 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSplitRegion,1,1733741778756.8d369bd75555122cbc103d82c8629467. service=AccessControlService 2024-12-09T10:56:19,754 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T10:56:19,754 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSplitRegion 8d369bd75555122cbc103d82c8629467 2024-12-09T10:56:19,754 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,1,1733741778756.8d369bd75555122cbc103d82c8629467.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T10:56:19,754 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7794): checking encryption for 8d369bd75555122cbc103d82c8629467 2024-12-09T10:56:19,755 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7797): checking classloading for 8d369bd75555122cbc103d82c8629467 2024-12-09T10:56:19,762 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSplitRegion,,1733741778756.aea25906d47c6460c0b78ec0f095922f. 2024-12-09T10:56:19,762 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7752): Opening region: {ENCODED => aea25906d47c6460c0b78ec0f095922f, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1733741778756.aea25906d47c6460c0b78ec0f095922f.', STARTKEY => '', ENDKEY => '1'} 2024-12-09T10:56:19,763 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSplitRegion,,1733741778756.aea25906d47c6460c0b78ec0f095922f. service=AccessControlService 2024-12-09T10:56:19,763 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T10:56:19,763 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSplitRegion aea25906d47c6460c0b78ec0f095922f 2024-12-09T10:56:19,763 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,,1733741778756.aea25906d47c6460c0b78ec0f095922f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T10:56:19,764 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7794): checking encryption for aea25906d47c6460c0b78ec0f095922f 2024-12-09T10:56:19,764 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7797): checking classloading for aea25906d47c6460c0b78ec0f095922f 2024-12-09T10:56:19,770 INFO [StoreOpener-8d369bd75555122cbc103d82c8629467-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 8d369bd75555122cbc103d82c8629467 2024-12-09T10:56:19,770 INFO [StoreOpener-aea25906d47c6460c0b78ec0f095922f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region aea25906d47c6460c0b78ec0f095922f 2024-12-09T10:56:19,779 INFO [StoreOpener-8d369bd75555122cbc103d82c8629467-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8d369bd75555122cbc103d82c8629467 columnFamilyName cf 2024-12-09T10:56:19,779 INFO [StoreOpener-aea25906d47c6460c0b78ec0f095922f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region aea25906d47c6460c0b78ec0f095922f columnFamilyName cf 2024-12-09T10:56:19,786 DEBUG [StoreOpener-8d369bd75555122cbc103d82c8629467-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:56:19,786 DEBUG [StoreOpener-aea25906d47c6460c0b78ec0f095922f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:56:19,787 INFO [StoreOpener-8d369bd75555122cbc103d82c8629467-1 {}] regionserver.HStore(327): Store=8d369bd75555122cbc103d82c8629467/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T10:56:19,787 INFO [StoreOpener-aea25906d47c6460c0b78ec0f095922f-1 {}] regionserver.HStore(327): Store=aea25906d47c6460c0b78ec0f095922f/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T10:56:19,788 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1038): replaying wal for 8d369bd75555122cbc103d82c8629467 2024-12-09T10:56:19,788 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1038): replaying wal for aea25906d47c6460c0b78ec0f095922f 2024-12-09T10:56:19,789 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSplitRegion/8d369bd75555122cbc103d82c8629467 2024-12-09T10:56:19,789 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSplitRegion/aea25906d47c6460c0b78ec0f095922f 2024-12-09T10:56:19,790 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSplitRegion/8d369bd75555122cbc103d82c8629467 2024-12-09T10:56:19,791 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSplitRegion/aea25906d47c6460c0b78ec0f095922f 2024-12-09T10:56:19,792 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1048): stopping wal replay for 8d369bd75555122cbc103d82c8629467 2024-12-09T10:56:19,792 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1048): stopping wal replay for aea25906d47c6460c0b78ec0f095922f 2024-12-09T10:56:19,792 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1060): Cleaning up temporary data for 8d369bd75555122cbc103d82c8629467 2024-12-09T10:56:19,792 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1060): Cleaning up temporary data for aea25906d47c6460c0b78ec0f095922f 2024-12-09T10:56:19,799 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1093): writing seq id for aea25906d47c6460c0b78ec0f095922f 2024-12-09T10:56:19,800 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1093): writing seq id for 8d369bd75555122cbc103d82c8629467 2024-12-09T10:56:19,807 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSplitRegion/aea25906d47c6460c0b78ec0f095922f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T10:56:19,808 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1114): Opened aea25906d47c6460c0b78ec0f095922f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64203522, jitterRate=-0.04329296946525574}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T10:56:19,808 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1122): Running coprocessor post-open hooks for aea25906d47c6460c0b78ec0f095922f 2024-12-09T10:56:19,810 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1006): Region open journal for aea25906d47c6460c0b78ec0f095922f: Running coprocessor pre-open hook at 1733741779764Writing region info on filesystem at 1733741779764Initializing all the Stores at 1733741779766 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733741779766Cleaning up temporary data from old regions at 1733741779792 (+26 ms)Running coprocessor post-open hooks at 1733741779808 (+16 ms)Region opened successfully at 1733741779809 (+1 ms) 2024-12-09T10:56:19,812 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSplitRegion,,1733741778756.aea25906d47c6460c0b78ec0f095922f., pid=11, masterSystemTime=1733741779715 2024-12-09T10:56:19,814 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSplitRegion/8d369bd75555122cbc103d82c8629467/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T10:56:19,816 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1114): Opened 8d369bd75555122cbc103d82c8629467; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63904544, jitterRate=-0.04774808883666992}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T10:56:19,817 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 8d369bd75555122cbc103d82c8629467 2024-12-09T10:56:19,817 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1006): Region open journal for 8d369bd75555122cbc103d82c8629467: Running coprocessor pre-open hook at 1733741779755Writing region info on filesystem at 1733741779755Initializing all the Stores at 1733741779758 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733741779758Cleaning up temporary data from old regions at 1733741779792 (+34 ms)Running coprocessor post-open hooks at 1733741779817 (+25 ms)Region opened successfully at 1733741779817 2024-12-09T10:56:19,819 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSplitRegion,,1733741778756.aea25906d47c6460c0b78ec0f095922f. 2024-12-09T10:56:19,819 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSplitRegion,1,1733741778756.8d369bd75555122cbc103d82c8629467., pid=10, masterSystemTime=1733741779712 2024-12-09T10:56:19,819 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSplitRegion,,1733741778756.aea25906d47c6460c0b78ec0f095922f. 2024-12-09T10:56:19,821 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=aea25906d47c6460c0b78ec0f095922f, regionState=OPEN, openSeqNum=2, regionLocation=3469f9ca0af3,33293,1733741767044 2024-12-09T10:56:19,824 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSplitRegion,1,1733741778756.8d369bd75555122cbc103d82c8629467. 2024-12-09T10:56:19,824 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSplitRegion,1,1733741778756.8d369bd75555122cbc103d82c8629467. 2024-12-09T10:56:19,826 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=8d369bd75555122cbc103d82c8629467, regionState=OPEN, openSeqNum=2, regionLocation=3469f9ca0af3,42349,1733741767108 2024-12-09T10:56:19,829 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure aea25906d47c6460c0b78ec0f095922f, server=3469f9ca0af3,33293,1733741767044 because future has completed 2024-12-09T10:56:19,837 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8d369bd75555122cbc103d82c8629467, server=3469f9ca0af3,42349,1733741767108 because future has completed 2024-12-09T10:56:19,847 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=8 2024-12-09T10:56:19,850 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=8, state=SUCCESS, hasLock=false; OpenRegionProcedure aea25906d47c6460c0b78ec0f095922f, server=3469f9ca0af3,33293,1733741767044 in 278 msec 2024-12-09T10:56:19,851 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-12-09T10:56:19,851 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; OpenRegionProcedure 8d369bd75555122cbc103d82c8629467, server=3469f9ca0af3,42349,1733741767108 in 287 msec 2024-12-09T10:56:19,854 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=aea25906d47c6460c0b78ec0f095922f, ASSIGN in 452 msec 2024-12-09T10:56:19,856 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=7 2024-12-09T10:56:19,856 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=8d369bd75555122cbc103d82c8629467, ASSIGN in 457 msec 2024-12-09T10:56:19,859 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T10:56:19,860 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733741779859"}]},"ts":"1733741779859"} 2024-12-09T10:56:19,863 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=ENABLED in hbase:meta 2024-12-09T10:56:19,865 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T10:56:19,870 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSplitRegion jenkins: RWXCA 2024-12-09T10:56:19,883 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb., hostname=3469f9ca0af3,39691,1733741766880, seqNum=2] 2024-12-09T10:56:19,892 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39691 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T10:56:19,892 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39691 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,39691,1733741766880, seqNum=-1] 2024-12-09T10:56:19,892 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39691 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T10:56:19,894 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58867, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientService 2024-12-09T10:56:19,897 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb., hostname=3469f9ca0af3,39691,1733741766880, seqNum=2] 2024-12-09T10:56:19,900 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39691 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-09T10:56:19,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-09T10:56:19,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T10:56:19,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-09T10:56:19,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-09T10:56:19,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-09T10:56:19,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T10:56:19,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T10:56:19,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T10:56:19,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T10:56:19,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T10:56:19,930 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T10:56:19,930 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T10:56:19,930 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T10:56:19,934 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 1.1720 sec 2024-12-09T10:56:19,938 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T10:56:20,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T10:56:20,934 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-09T10:56:20,938 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-09T10:56:20,944 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T10:56:20,944 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSplitRegion,,1733741778756.aea25906d47c6460c0b78ec0f095922f. 2024-12-09T10:56:20,946 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T10:56:20,949 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-09T10:56:20,966 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-09T10:56:20,972 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T10:56:20,978 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57962, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T10:56:20,983 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T10:56:21,007 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33738, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T10:56:21,014 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-09T10:56:21,028 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSplitRegion' 2024-12-09T10:56:21,036 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-09T10:56:21,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733741781036 (current time:1733741781036). 2024-12-09T10:56:21,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T10:56:21,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-12-09T10:56:21,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T10:56:21,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42a098c5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:56:21,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T10:56:21,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T10:56:21,040 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T10:56:21,041 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T10:56:21,041 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T10:56:21,041 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1148526f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:56:21,041 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T10:56:21,042 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T10:56:21,042 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:56:21,046 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45570, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T10:56:21,048 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@643c3c59, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:56:21,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T10:56:21,051 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,39691,1733741766880, seqNum=-1] 2024-12-09T10:56:21,051 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T10:56:21,053 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33256, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T10:56:21,056 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815. 2024-12-09T10:56:21,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T10:56:21,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:56:21,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:56:21,081 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T10:56:21,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47799b2c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:56:21,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T10:56:21,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T10:56:21,100 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T10:56:21,101 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T10:56:21,101 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T10:56:21,101 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f66e182, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:56:21,102 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T10:56:21,102 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T10:56:21,102 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:56:21,104 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45578, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T10:56:21,105 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f3596c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:56:21,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T10:56:21,109 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,39691,1733741766880, seqNum=-1] 2024-12-09T10:56:21,109 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T10:56:21,111 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33262, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T10:56:21,115 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb., hostname=3469f9ca0af3,39691,1733741766880, seqNum=2] 2024-12-09T10:56:21,119 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815. 2024-12-09T10:56:21,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T10:56:21,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:56:21,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:56:21,120 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T10:56:21,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-09T10:56:21,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T10:56:21,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-09T10:56:21,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-12-09T10:56:21,139 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T10:56:21,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-09T10:56:21,152 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T10:56:21,172 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T10:56:21,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741845_1021 (size=215) 2024-12-09T10:56:21,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741845_1021 (size=215) 2024-12-09T10:56:21,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741845_1021 (size=215) 2024-12-09T10:56:21,234 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T10:56:21,238 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure aea25906d47c6460c0b78ec0f095922f}, {pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8d369bd75555122cbc103d82c8629467}] 2024-12-09T10:56:21,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-09T10:56:21,243 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure aea25906d47c6460c0b78ec0f095922f 2024-12-09T10:56:21,243 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8d369bd75555122cbc103d82c8629467 2024-12-09T10:56:21,405 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42349 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=14 2024-12-09T10:56:21,405 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33293 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=13 2024-12-09T10:56:21,406 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733741778756.aea25906d47c6460c0b78ec0f095922f. 2024-12-09T10:56:21,407 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733741778756.8d369bd75555122cbc103d82c8629467. 2024-12-09T10:56:21,412 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 8d369bd75555122cbc103d82c8629467: 2024-12-09T10:56:21,412 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,1,1733741778756.8d369bd75555122cbc103d82c8629467. for emptySnaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-09T10:56:21,413 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,1,1733741778756.8d369bd75555122cbc103d82c8629467.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-09T10:56:21,414 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.HRegion(2603): Flush status journal for aea25906d47c6460c0b78ec0f095922f: 2024-12-09T10:56:21,414 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,,1733741778756.aea25906d47c6460c0b78ec0f095922f. for emptySnaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-09T10:56:21,414 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,,1733741778756.aea25906d47c6460c0b78ec0f095922f.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-09T10:56:21,417 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T10:56:21,417 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T10:56:21,419 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-09T10:56:21,419 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-09T10:56:21,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-09T10:56:21,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741846_1022 (size=86) 2024-12-09T10:56:21,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741846_1022 (size=86) 2024-12-09T10:56:21,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741846_1022 (size=86) 2024-12-09T10:56:21,487 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733741778756.aea25906d47c6460c0b78ec0f095922f. 2024-12-09T10:56:21,489 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-12-09T10:56:21,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=13 2024-12-09T10:56:21,493 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion on region aea25906d47c6460c0b78ec0f095922f 2024-12-09T10:56:21,493 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure aea25906d47c6460c0b78ec0f095922f 2024-12-09T10:56:21,501 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure aea25906d47c6460c0b78ec0f095922f in 259 msec 2024-12-09T10:56:21,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741847_1023 (size=86) 2024-12-09T10:56:21,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741847_1023 (size=86) 2024-12-09T10:56:21,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741847_1023 (size=86) 2024-12-09T10:56:21,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733741778756.8d369bd75555122cbc103d82c8629467. 2024-12-09T10:56:21,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-12-09T10:56:21,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-12-09T10:56:21,511 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion on region 8d369bd75555122cbc103d82c8629467 2024-12-09T10:56:21,511 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8d369bd75555122cbc103d82c8629467 2024-12-09T10:56:21,519 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T10:56:21,521 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=12 2024-12-09T10:56:21,522 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T10:56:21,523 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 8d369bd75555122cbc103d82c8629467 in 276 msec 2024-12-09T10:56:21,525 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-09T10:56:21,525 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-09T10:56:21,526 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:56:21,527 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-09T10:56:21,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741848_1024 (size=78) 2024-12-09T10:56:21,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741848_1024 (size=78) 2024-12-09T10:56:21,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741848_1024 (size=78) 2024-12-09T10:56:21,555 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T10:56:21,556 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-09T10:56:21,560 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-09T10:56:21,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741849_1025 (size=713) 2024-12-09T10:56:21,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741849_1025 (size=713) 2024-12-09T10:56:21,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741849_1025 (size=713) 2024-12-09T10:56:21,630 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T10:56:21,652 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T10:56:21,654 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSplitRegion to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-09T10:56:21,660 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T10:56:21,661 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-12-09T10:56:21,670 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 532 msec 2024-12-09T10:56:21,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-09T10:56:21,764 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-09T10:56:21,780 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33293 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSplitRegion,,1733741778756.aea25906d47c6460c0b78ec0f095922f. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T10:56:21,784 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42349 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSplitRegion,1,1733741778756.8d369bd75555122cbc103d82c8629467. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T10:56:21,796 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-09T10:56:21,802 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T10:56:21,802 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSplitRegion,,1733741778756.aea25906d47c6460c0b78ec0f095922f. 2024-12-09T10:56:21,803 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T10:56:21,806 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-09T10:56:21,817 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-09T10:56:21,832 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-09T10:56:21,842 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-09T10:56:21,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733741781842 (current time:1733741781842). 2024-12-09T10:56:21,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T10:56:21,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-12-09T10:56:21,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T10:56:21,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@774068d1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:56:21,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T10:56:21,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T10:56:21,857 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T10:56:21,858 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T10:56:21,858 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T10:56:21,859 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e712139, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:56:21,859 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T10:56:21,860 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T10:56:21,861 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:56:21,866 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45598, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T10:56:21,868 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@87cd0d5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:56:21,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T10:56:21,873 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,39691,1733741766880, seqNum=-1] 2024-12-09T10:56:21,874 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T10:56:21,877 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33274, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T10:56:21,884 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815. 2024-12-09T10:56:21,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T10:56:21,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:56:21,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:56:21,884 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T10:56:21,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@207bad30, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:56:21,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T10:56:21,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T10:56:21,894 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T10:56:21,897 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T10:56:21,897 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T10:56:21,898 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d4ce3d7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:56:21,898 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T10:56:21,899 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T10:56:21,902 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:56:21,904 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45624, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T10:56:21,906 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8a04917, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:56:21,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T10:56:21,913 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,39691,1733741766880, seqNum=-1] 2024-12-09T10:56:21,914 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T10:56:21,915 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33284, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T10:56:21,921 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb., hostname=3469f9ca0af3,39691,1733741766880, seqNum=2] 2024-12-09T10:56:21,924 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815. 2024-12-09T10:56:21,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T10:56:21,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:56:21,925 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T10:56:21,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:56:21,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-09T10:56:21,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T10:56:21,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-09T10:56:21,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-12-09T10:56:21,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-09T10:56:21,938 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T10:56:21,942 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T10:56:21,956 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T10:56:22,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-09T10:56:22,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741850_1026 (size=210) 2024-12-09T10:56:22,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741850_1026 (size=210) 2024-12-09T10:56:22,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741850_1026 (size=210) 2024-12-09T10:56:22,072 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T10:56:22,073 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure aea25906d47c6460c0b78ec0f095922f}, {pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8d369bd75555122cbc103d82c8629467}] 2024-12-09T10:56:22,075 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure aea25906d47c6460c0b78ec0f095922f 2024-12-09T10:56:22,075 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8d369bd75555122cbc103d82c8629467 2024-12-09T10:56:22,231 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33293 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=16 2024-12-09T10:56:22,232 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42349 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=17 2024-12-09T10:56:22,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-09T10:56:22,257 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733741778756.aea25906d47c6460c0b78ec0f095922f. 2024-12-09T10:56:22,263 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2902): Flushing aea25906d47c6460c0b78ec0f095922f 1/1 column families, dataSize=333 B heapSize=976 B 2024-12-09T10:56:22,270 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733741778756.8d369bd75555122cbc103d82c8629467. 2024-12-09T10:56:22,270 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2902): Flushing 8d369bd75555122cbc103d82c8629467 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-12-09T10:56:22,431 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202412091fc32268c7e240f08c8d6214556e1f08_8d369bd75555122cbc103d82c8629467 is 71, key is 10427db65def575e694e163d89a3b026/cf:q/1733741781784/Put/seqid=0 2024-12-09T10:56:22,445 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120928e7e80ebc08450bb6d52ca748bf0418_aea25906d47c6460c0b78ec0f095922f is 71, key is 02f360d32f2389d1425cae9382d8a29e/cf:q/1733741781780/Put/seqid=0 2024-12-09T10:56:22,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-09T10:56:22,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741851_1027 (size=8031) 2024-12-09T10:56:22,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741851_1027 (size=8031) 2024-12-09T10:56:22,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741851_1027 (size=8031) 2024-12-09T10:56:22,635 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:56:22,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741852_1028 (size=5241) 2024-12-09T10:56:22,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741852_1028 (size=5241) 2024-12-09T10:56:22,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741852_1028 (size=5241) 2024-12-09T10:56:22,732 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:56:22,818 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202412091fc32268c7e240f08c8d6214556e1f08_8d369bd75555122cbc103d82c8629467 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b202412091fc32268c7e240f08c8d6214556e1f08_8d369bd75555122cbc103d82c8629467 2024-12-09T10:56:22,818 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120928e7e80ebc08450bb6d52ca748bf0418_aea25906d47c6460c0b78ec0f095922f to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e2024120928e7e80ebc08450bb6d52ca748bf0418_aea25906d47c6460c0b78ec0f095922f 2024-12-09T10:56:22,821 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSplitRegion/aea25906d47c6460c0b78ec0f095922f/.tmp/cf/cfec7c2745c24305bd43097e4290a8f2, store: [table=testtb-testExportFileSystemStateWithSplitRegion family=cf region=aea25906d47c6460c0b78ec0f095922f] 2024-12-09T10:56:22,825 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSplitRegion/8d369bd75555122cbc103d82c8629467/.tmp/cf/70a1289718844b87a9a78c4b25cfbe2f, store: [table=testtb-testExportFileSystemStateWithSplitRegion family=cf region=8d369bd75555122cbc103d82c8629467] 2024-12-09T10:56:22,864 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSplitRegion/8d369bd75555122cbc103d82c8629467/.tmp/cf/70a1289718844b87a9a78c4b25cfbe2f is 224, key is 1ffa6ca95544da4c38ee95ecd323e227d/cf:q/1733741781784/Put/seqid=0 2024-12-09T10:56:22,865 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSplitRegion/aea25906d47c6460c0b78ec0f095922f/.tmp/cf/cfec7c2745c24305bd43097e4290a8f2 is 224, key is 0c8c4ecdf6e5c8af7ece222e300607018/cf:q/1733741781780/Put/seqid=0 2024-12-09T10:56:23,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741853_1029 (size=15277) 2024-12-09T10:56:23,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741853_1029 (size=15277) 2024-12-09T10:56:23,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741853_1029 (size=15277) 2024-12-09T10:56:23,056 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSplitRegion/8d369bd75555122cbc103d82c8629467/.tmp/cf/70a1289718844b87a9a78c4b25cfbe2f 2024-12-09T10:56:23,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-09T10:56:23,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741854_1030 (size=6416) 2024-12-09T10:56:23,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741854_1030 (size=6416) 2024-12-09T10:56:23,093 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=333, hasBloomFilter=true, into tmp file hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSplitRegion/aea25906d47c6460c0b78ec0f095922f/.tmp/cf/cfec7c2745c24305bd43097e4290a8f2 2024-12-09T10:56:23,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741854_1030 (size=6416) 2024-12-09T10:56:23,096 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSplitRegion/8d369bd75555122cbc103d82c8629467/.tmp/cf/70a1289718844b87a9a78c4b25cfbe2f as hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSplitRegion/8d369bd75555122cbc103d82c8629467/cf/70a1289718844b87a9a78c4b25cfbe2f 2024-12-09T10:56:23,111 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSplitRegion/aea25906d47c6460c0b78ec0f095922f/.tmp/cf/cfec7c2745c24305bd43097e4290a8f2 as hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSplitRegion/aea25906d47c6460c0b78ec0f095922f/cf/cfec7c2745c24305bd43097e4290a8f2 2024-12-09T10:56:23,124 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSplitRegion/aea25906d47c6460c0b78ec0f095922f/cf/cfec7c2745c24305bd43097e4290a8f2, entries=5, sequenceid=6, filesize=6.3 K 2024-12-09T10:56:23,136 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSplitRegion/8d369bd75555122cbc103d82c8629467/cf/70a1289718844b87a9a78c4b25cfbe2f, entries=45, sequenceid=6, filesize=14.9 K 2024-12-09T10:56:23,138 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(3140): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for aea25906d47c6460c0b78ec0f095922f in 875ms, sequenceid=6, compaction requested=false 2024-12-09T10:56:23,138 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2603): Flush status journal for aea25906d47c6460c0b78ec0f095922f: 2024-12-09T10:56:23,138 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,,1733741778756.aea25906d47c6460c0b78ec0f095922f. for snaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-09T10:56:23,139 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,,1733741778756.aea25906d47c6460c0b78ec0f095922f.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-09T10:56:23,139 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T10:56:23,139 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSplitRegion/aea25906d47c6460c0b78ec0f095922f/cf/cfec7c2745c24305bd43097e4290a8f2] hfiles 2024-12-09T10:56:23,139 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(3140): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for 8d369bd75555122cbc103d82c8629467 in 869ms, sequenceid=6, compaction requested=false 2024-12-09T10:56:23,139 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2603): Flush status journal for 8d369bd75555122cbc103d82c8629467: 2024-12-09T10:56:23,139 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,1,1733741778756.8d369bd75555122cbc103d82c8629467. for snaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-09T10:56:23,140 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,1,1733741778756.8d369bd75555122cbc103d82c8629467.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-09T10:56:23,140 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T10:56:23,142 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSplitRegion/8d369bd75555122cbc103d82c8629467/cf/70a1289718844b87a9a78c4b25cfbe2f] hfiles 2024-12-09T10:56:23,143 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSplitRegion/8d369bd75555122cbc103d82c8629467/cf/70a1289718844b87a9a78c4b25cfbe2f for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-09T10:56:23,151 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSplitRegion/aea25906d47c6460c0b78ec0f095922f/cf/cfec7c2745c24305bd43097e4290a8f2 for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-09T10:56:23,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741855_1031 (size=125) 2024-12-09T10:56:23,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741855_1031 (size=125) 2024-12-09T10:56:23,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741855_1031 (size=125) 2024-12-09T10:56:23,323 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733741778756.8d369bd75555122cbc103d82c8629467. 2024-12-09T10:56:23,325 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-12-09T10:56:23,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=17 2024-12-09T10:56:23,342 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSplitRegion on region 8d369bd75555122cbc103d82c8629467 2024-12-09T10:56:23,344 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8d369bd75555122cbc103d82c8629467 2024-12-09T10:56:23,362 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=17, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 8d369bd75555122cbc103d82c8629467 in 1.2770 sec 2024-12-09T10:56:23,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741856_1032 (size=125) 2024-12-09T10:56:23,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741856_1032 (size=125) 2024-12-09T10:56:23,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741856_1032 (size=125) 2024-12-09T10:56:23,373 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733741778756.aea25906d47c6460c0b78ec0f095922f. 2024-12-09T10:56:23,373 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=16 2024-12-09T10:56:23,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=16 2024-12-09T10:56:23,378 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSplitRegion on region aea25906d47c6460c0b78ec0f095922f 2024-12-09T10:56:23,382 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure aea25906d47c6460c0b78ec0f095922f 2024-12-09T10:56:23,396 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=16, resume processing ppid=15 2024-12-09T10:56:23,396 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=16, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure aea25906d47c6460c0b78ec0f095922f in 1.3200 sec 2024-12-09T10:56:23,397 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T10:56:23,398 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T10:56:23,414 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-09T10:56:23,415 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-09T10:56:23,415 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:56:23,419 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b202412091fc32268c7e240f08c8d6214556e1f08_8d369bd75555122cbc103d82c8629467, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e2024120928e7e80ebc08450bb6d52ca748bf0418_aea25906d47c6460c0b78ec0f095922f] hfiles 2024-12-09T10:56:23,419 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b202412091fc32268c7e240f08c8d6214556e1f08_8d369bd75555122cbc103d82c8629467 2024-12-09T10:56:23,419 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e2024120928e7e80ebc08450bb6d52ca748bf0418_aea25906d47c6460c0b78ec0f095922f 2024-12-09T10:56:23,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741857_1033 (size=309) 2024-12-09T10:56:23,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741857_1033 (size=309) 2024-12-09T10:56:23,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741857_1033 (size=309) 2024-12-09T10:56:23,506 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T10:56:23,506 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-09T10:56:23,508 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-09T10:56:23,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741858_1034 (size=1023) 2024-12-09T10:56:23,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741858_1034 (size=1023) 2024-12-09T10:56:23,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741858_1034 (size=1023) 2024-12-09T10:56:23,646 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T10:56:23,684 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T10:56:23,685 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSplitRegion to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-09T10:56:23,689 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T10:56:23,690 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-12-09T10:56:23,703 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=15, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 1.7620 sec 2024-12-09T10:56:24,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-09T10:56:24,085 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-09T10:56:24,163 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T10:56:24,170 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T10:56:24,175 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57968, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T10:56:24,177 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33293 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-09T10:56:24,182 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T10:56:24,185 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33298, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T10:56:24,186 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39691 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-09T10:56:24,204 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33748, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T10:56:24,204 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42349 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-09T10:56:24,208 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportFileSystemStateWithSplitRegion', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T10:56:24,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=18, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-09T10:56:24,213 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T10:56:24,213 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:56:24,214 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportFileSystemStateWithSplitRegion" procId is: 18 2024-12-09T10:56:24,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-09T10:56:24,217 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T10:56:24,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741859_1035 (size=390) 2024-12-09T10:56:24,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741859_1035 (size=390) 2024-12-09T10:56:24,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741859_1035 (size=390) 2024-12-09T10:56:24,271 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ed0231a21bfb4a829120c4d62b2caafd, NAME => 'testExportFileSystemStateWithSplitRegion,,1733741784207.ed0231a21bfb4a829120c4d62b2caafd.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T10:56:24,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-09T10:56:24,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741860_1036 (size=75) 2024-12-09T10:56:24,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741860_1036 (size=75) 2024-12-09T10:56:24,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741860_1036 (size=75) 2024-12-09T10:56:24,390 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1733741784207.ed0231a21bfb4a829120c4d62b2caafd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T10:56:24,390 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1722): Closing ed0231a21bfb4a829120c4d62b2caafd, disabling compactions & flushes 2024-12-09T10:56:24,390 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1733741784207.ed0231a21bfb4a829120c4d62b2caafd. 2024-12-09T10:56:24,390 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1733741784207.ed0231a21bfb4a829120c4d62b2caafd. 2024-12-09T10:56:24,390 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1733741784207.ed0231a21bfb4a829120c4d62b2caafd. after waiting 0 ms 2024-12-09T10:56:24,390 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1733741784207.ed0231a21bfb4a829120c4d62b2caafd. 2024-12-09T10:56:24,391 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1733741784207.ed0231a21bfb4a829120c4d62b2caafd. 2024-12-09T10:56:24,399 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for ed0231a21bfb4a829120c4d62b2caafd: Waiting for close lock at 1733741784390Disabling compacts and flushes for region at 1733741784390Disabling writes for close at 1733741784390Writing region close event to WAL at 1733741784391 (+1 ms)Closed at 1733741784391 2024-12-09T10:56:24,404 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T10:56:24,406 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportFileSystemStateWithSplitRegion,,1733741784207.ed0231a21bfb4a829120c4d62b2caafd.","families":{"info":[{"qualifier":"regioninfo","vlen":74,"tag":[],"timestamp":"1733741784404"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733741784404"}]},"ts":"1733741784404"} 2024-12-09T10:56:24,412 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T10:56:24,417 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T10:56:24,418 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733741784417"}]},"ts":"1733741784417"} 2024-12-09T10:56:24,424 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=ENABLING in hbase:meta 2024-12-09T10:56:24,426 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {3469f9ca0af3=0} racks are {/default-rack=0} 2024-12-09T10:56:24,429 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T10:56:24,429 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T10:56:24,429 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T10:56:24,429 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T10:56:24,429 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T10:56:24,429 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T10:56:24,429 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T10:56:24,429 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T10:56:24,429 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T10:56:24,429 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T10:56:24,430 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=ed0231a21bfb4a829120c4d62b2caafd, ASSIGN}] 2024-12-09T10:56:24,433 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=ed0231a21bfb4a829120c4d62b2caafd, ASSIGN 2024-12-09T10:56:24,437 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=ed0231a21bfb4a829120c4d62b2caafd, ASSIGN; state=OFFLINE, location=3469f9ca0af3,33293,1733741767044; forceNewPlan=false, retain=false 2024-12-09T10:56:24,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-09T10:56:24,588 INFO [3469f9ca0af3:35815 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-09T10:56:24,589 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=ed0231a21bfb4a829120c4d62b2caafd, regionState=OPENING, regionLocation=3469f9ca0af3,33293,1733741767044 2024-12-09T10:56:24,594 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=ed0231a21bfb4a829120c4d62b2caafd, ASSIGN because future has completed 2024-12-09T10:56:24,611 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=20, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure ed0231a21bfb4a829120c4d62b2caafd, server=3469f9ca0af3,33293,1733741767044}] 2024-12-09T10:56:24,774 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,,1733741784207.ed0231a21bfb4a829120c4d62b2caafd. 2024-12-09T10:56:24,775 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7752): Opening region: {ENCODED => ed0231a21bfb4a829120c4d62b2caafd, NAME => 'testExportFileSystemStateWithSplitRegion,,1733741784207.ed0231a21bfb4a829120c4d62b2caafd.', STARTKEY => '', ENDKEY => ''} 2024-12-09T10:56:24,775 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,,1733741784207.ed0231a21bfb4a829120c4d62b2caafd. service=AccessControlService 2024-12-09T10:56:24,776 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T10:56:24,776 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion ed0231a21bfb4a829120c4d62b2caafd 2024-12-09T10:56:24,776 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1733741784207.ed0231a21bfb4a829120c4d62b2caafd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T10:56:24,776 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7794): checking encryption for ed0231a21bfb4a829120c4d62b2caafd 2024-12-09T10:56:24,776 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7797): checking classloading for ed0231a21bfb4a829120c4d62b2caafd 2024-12-09T10:56:24,794 INFO [StoreOpener-ed0231a21bfb4a829120c4d62b2caafd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ed0231a21bfb4a829120c4d62b2caafd 2024-12-09T10:56:24,799 INFO [StoreOpener-ed0231a21bfb4a829120c4d62b2caafd-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ed0231a21bfb4a829120c4d62b2caafd columnFamilyName cf 2024-12-09T10:56:24,799 DEBUG [StoreOpener-ed0231a21bfb4a829120c4d62b2caafd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:56:24,800 INFO [StoreOpener-ed0231a21bfb4a829120c4d62b2caafd-1 {}] regionserver.HStore(327): Store=ed0231a21bfb4a829120c4d62b2caafd/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T10:56:24,801 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1038): replaying wal for ed0231a21bfb4a829120c4d62b2caafd 2024-12-09T10:56:24,802 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/ed0231a21bfb4a829120c4d62b2caafd 2024-12-09T10:56:24,803 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/ed0231a21bfb4a829120c4d62b2caafd 2024-12-09T10:56:24,804 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1048): stopping wal replay for ed0231a21bfb4a829120c4d62b2caafd 2024-12-09T10:56:24,804 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1060): Cleaning up temporary data for ed0231a21bfb4a829120c4d62b2caafd 2024-12-09T10:56:24,811 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1093): writing seq id for ed0231a21bfb4a829120c4d62b2caafd 2024-12-09T10:56:24,836 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/ed0231a21bfb4a829120c4d62b2caafd/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T10:56:24,838 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1114): Opened ed0231a21bfb4a829120c4d62b2caafd; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68536095, jitterRate=0.021267399191856384}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T10:56:24,838 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ed0231a21bfb4a829120c4d62b2caafd 2024-12-09T10:56:24,840 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1006): Region open journal for ed0231a21bfb4a829120c4d62b2caafd: Running coprocessor pre-open hook at 1733741784777Writing region info on filesystem at 1733741784777Initializing all the Stores at 1733741784779 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733741784779Cleaning up temporary data from old regions at 1733741784804 (+25 ms)Running coprocessor post-open hooks at 1733741784838 (+34 ms)Region opened successfully at 1733741784840 (+2 ms) 2024-12-09T10:56:24,842 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,,1733741784207.ed0231a21bfb4a829120c4d62b2caafd., pid=20, masterSystemTime=1733741784765 2024-12-09T10:56:24,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-09T10:56:24,847 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,,1733741784207.ed0231a21bfb4a829120c4d62b2caafd. 2024-12-09T10:56:24,847 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,,1733741784207.ed0231a21bfb4a829120c4d62b2caafd. 2024-12-09T10:56:24,850 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=ed0231a21bfb4a829120c4d62b2caafd, regionState=OPEN, openSeqNum=2, regionLocation=3469f9ca0af3,33293,1733741767044 2024-12-09T10:56:24,855 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=20, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure ed0231a21bfb4a829120c4d62b2caafd, server=3469f9ca0af3,33293,1733741767044 because future has completed 2024-12-09T10:56:24,873 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=20, resume processing ppid=19 2024-12-09T10:56:24,873 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=20, ppid=19, state=SUCCESS, hasLock=false; OpenRegionProcedure ed0231a21bfb4a829120c4d62b2caafd, server=3469f9ca0af3,33293,1733741767044 in 257 msec 2024-12-09T10:56:24,882 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T10:56:24,882 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733741784882"}]},"ts":"1733741784882"} 2024-12-09T10:56:24,883 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=19, resume processing ppid=18 2024-12-09T10:56:24,883 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=19, ppid=18, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=ed0231a21bfb4a829120c4d62b2caafd, ASSIGN in 443 msec 2024-12-09T10:56:24,886 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=ENABLED in hbase:meta 2024-12-09T10:56:24,889 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T10:56:24,889 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testExportFileSystemStateWithSplitRegion jenkins: RWXCA 2024-12-09T10:56:24,896 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39691 {}] access.PermissionStorage(613): Read acl: entry[testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-09T10:56:24,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T10:56:24,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T10:56:24,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T10:56:24,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T10:56:24,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-09T10:56:24,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-09T10:56:24,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-09T10:56:24,904 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T10:56:24,904 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T10:56:24,905 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T10:56:24,906 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T10:56:24,906 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T10:56:24,906 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T10:56:24,906 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T10:56:24,906 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T10:56:24,918 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=18, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion in 698 msec 2024-12-09T10:56:25,258 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T10:56:25,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-09T10:56:25,354 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-09T10:56:25,355 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T10:56:25,361 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T10:56:26,469 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion 2024-12-09T10:56:26,469 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion Metrics about Tables on a single HBase RegionServer 2024-12-09T10:56:26,471 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T10:56:26,471 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion Metrics about Tables on a single HBase RegionServer 2024-12-09T10:56:27,030 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportFileSystemStateWithSplitRegion' 2024-12-09T10:56:28,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741861_1037 (size=134217728) 2024-12-09T10:56:28,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741861_1037 (size=134217728) 2024-12-09T10:56:28,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741861_1037 (size=134217728) 2024-12-09T10:56:30,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741862_1038 (size=134217728) 2024-12-09T10:56:30,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741862_1038 (size=134217728) 2024-12-09T10:56:30,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741862_1038 (size=134217728) 2024-12-09T10:56:30,280 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T10:56:30,849 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/output/cf/test_file is 35, key is 1\x00\x00\x00/cf:q/1733741785370/Put/seqid=0 2024-12-09T10:56:30,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741863_1039 (size=51979256) 2024-12-09T10:56:30,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741863_1039 (size=51979256) 2024-12-09T10:56:30,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741863_1039 (size=51979256) 2024-12-09T10:56:30,864 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1bdffc42, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:56:30,864 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T10:56:30,864 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T10:56:30,866 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T10:56:30,867 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T10:56:30,867 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T10:56:30,868 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a1065c4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:56:30,868 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T10:56:30,868 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T10:56:30,868 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:56:30,870 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55350, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T10:56:30,872 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d2b79d5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:56:30,873 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T10:56:30,874 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,39691,1733741766880, seqNum=-1] 2024-12-09T10:56:30,875 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T10:56:30,883 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60198, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T10:56:30,901 WARN [Time-limited test {}] tool.BulkLoadHFilesTool$1(330): Trying to bulk load hfile hdfs://localhost:35869/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/output/cf/test_file with size: 320414712 bytes can be problematic as it may lead to oversplitting. 2024-12-09T10:56:30,902 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T10:56:30,903 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] client.AsyncConnectionImpl(321): The fetched master address is 3469f9ca0af3,35815,1733741765917 2024-12-09T10:56:30,904 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@23594375 2024-12-09T10:56:30,904 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T10:56:30,906 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55362, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T10:56:30,914 WARN [IPC Server handler 4 on default port 35869 {}] namenode.FSNamesystem(6314): trying to get DT with no secret manager running 2024-12-09T10:56:30,920 DEBUG [RPCClient-NioEventLoopGroup-6-12 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportFileSystemStateWithSplitRegion', row='', locateType=CURRENT is [region=testExportFileSystemStateWithSplitRegion,,1733741784207.ed0231a21bfb4a829120c4d62b2caafd., hostname=3469f9ca0af3,33293,1733741767044, seqNum=2] 2024-12-09T10:56:30,924 DEBUG [RPCClient-NioEventLoopGroup-6-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T10:56:30,928 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35446, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T10:56:30,934 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-09T10:56:30,955 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:35869/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/output/cf/test_file first=Optional[1\x00\x00\x00] last=Optional[9\x00\x00\x00] 2024-12-09T10:56:30,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33293 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T10:56:30,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33293 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,39691,1733741766880, seqNum=-1] 2024-12-09T10:56:30,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33293 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T10:56:30,981 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41713, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=ClientService 2024-12-09T10:56:30,982 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39691 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-09T10:56:30,986 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39691 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: ExecService size: 101 connection: 172.17.0.2:41713 deadline: 1733741850981, exception=org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 2024-12-09T10:56:30,988 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33293 {}] regionserver.SecureBulkLoadManager(227): unable to add token java.util.concurrent.ExecutionException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:396) ~[?:?] at java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073) ~[?:?] at org.apache.hadoop.hbase.regionserver.SecureBulkLoadManager.secureBulkLoadHFiles(SecureBulkLoadManager.java:221) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.bulkLoadHFile(RSRpcServices.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43510) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] Caused by: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.RegionCoprocessorRpcChannelImpl.lambda$rpcCall$0(RegionCoprocessorRpcChannelImpl.java:90) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T10:56:30,991 WARN [IPC Server handler 4 on default port 35869 {}] namenode.FSNamesystem(6314): trying to get DT with no secret manager running 2024-12-09T10:56:31,027 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33293 {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:35869/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/output/cf/test_file for inclusion in ed0231a21bfb4a829120c4d62b2caafd/cf 2024-12-09T10:56:31,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33293 {}] regionserver.HStore(626): HFile bounds: first=1\x00\x00\x00 last=9\x00\x00\x00 2024-12-09T10:56:31,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33293 {}] regionserver.HStore(628): Region bounds: first= last= 2024-12-09T10:56:31,037 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33293 {}] regionserver.HStore(641): Trying to bulk load hfile hdfs://localhost:35869/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/output/cf/test_file with size: 320414712 bytes can be problematic as it may lead to oversplitting. 2024-12-09T10:56:31,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33293 {}] regionserver.HRegion(2603): Flush status journal for ed0231a21bfb4a829120c4d62b2caafd: 2024-12-09T10:56:31,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33293 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(397): Moving hdfs://localhost:35869/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/output/cf/test_file to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/staging/jenkins__testExportFileSystemStateWithSplitRegion__qdjcj03gp6582pbkhtnqa7497rk8995q2heshlrq264rn55r7is6sdct4e1593t1/cf/test_file 2024-12-09T10:56:31,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33293 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/staging/jenkins__testExportFileSystemStateWithSplitRegion__qdjcj03gp6582pbkhtnqa7497rk8995q2heshlrq264rn55r7is6sdct4e1593t1/cf/test_file as hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/ed0231a21bfb4a829120c4d62b2caafd/cf/d3e7ed4a6467458d8c4d637dae658f50_SeqId_4_ 2024-12-09T10:56:31,047 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33293 {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/staging/jenkins__testExportFileSystemStateWithSplitRegion__qdjcj03gp6582pbkhtnqa7497rk8995q2heshlrq264rn55r7is6sdct4e1593t1/cf/test_file into ed0231a21bfb4a829120c4d62b2caafd/cf as hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/ed0231a21bfb4a829120c4d62b2caafd/cf/d3e7ed4a6467458d8c4d637dae658f50_SeqId_4_ - updating store file list. 2024-12-09T10:56:31,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33293 {}] regionserver.HStoreFile(483): HFile Bloom filter type for d3e7ed4a6467458d8c4d637dae658f50_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-09T10:56:31,059 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33293 {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/ed0231a21bfb4a829120c4d62b2caafd/cf/d3e7ed4a6467458d8c4d637dae658f50_SeqId_4_ into ed0231a21bfb4a829120c4d62b2caafd/cf 2024-12-09T10:56:31,059 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33293 {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/staging/jenkins__testExportFileSystemStateWithSplitRegion__qdjcj03gp6582pbkhtnqa7497rk8995q2heshlrq264rn55r7is6sdct4e1593t1/cf/test_file into ed0231a21bfb4a829120c4d62b2caafd/cf (new location: hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/ed0231a21bfb4a829120c4d62b2caafd/cf/d3e7ed4a6467458d8c4d637dae658f50_SeqId_4_) 2024-12-09T10:56:31,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33293 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(412): Bulk Load done for: hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/staging/jenkins__testExportFileSystemStateWithSplitRegion__qdjcj03gp6582pbkhtnqa7497rk8995q2heshlrq264rn55r7is6sdct4e1593t1/cf/test_file 2024-12-09T10:56:31,074 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T10:56:31,075 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.tool.BulkLoadHFilesTool.bulkLoad(BulkLoadHFilesTool.java:1125) at org.apache.hadoop.hbase.tool.BulkLoadHFilesTool.run(BulkLoadHFilesTool.java:1176) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemStateWithSplitRegion(TestExportSnapshot.java:229) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T10:56:31,075 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:56:31,077 DEBUG [RPCClient-NioEventLoopGroup-6-14 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testExportFileSystemStateWithSplitRegion,,1733741784207.ed0231a21bfb4a829120c4d62b2caafd., hostname=3469f9ca0af3,33293,1733741767044, seqNum=2 , the old value is region=testExportFileSystemStateWithSplitRegion,,1733741784207.ed0231a21bfb4a829120c4d62b2caafd., hostname=3469f9ca0af3,33293,1733741767044, seqNum=2, error=org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Call to address=3469f9ca0af3:33293 failed on local exception: org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-12-09T10:56:31,077 DEBUG [RPCClient-NioEventLoopGroup-6-14 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testExportFileSystemStateWithSplitRegion,,1733741784207.ed0231a21bfb4a829120c4d62b2caafd., hostname=3469f9ca0af3,33293,1733741767044, seqNum=2 is org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-12-09T10:56:31,078 DEBUG [RPCClient-NioEventLoopGroup-6-14 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testExportFileSystemStateWithSplitRegion,,1733741784207.ed0231a21bfb4a829120c4d62b2caafd., hostname=3469f9ca0af3,33293,1733741767044, seqNum=2 from cache 2024-12-09T10:56:31,078 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T10:56:31,078 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:56:31,079 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-09T10:56:31,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33293 {}] ipc.CallRunner(93): RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33293: skipped callId: 7 service: ClientService methodName: CleanupBulkLoad size: 336 connection: 172.17.0.2:35446 deadline: 1733741851075 param: TODO: class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$CleanupBulkLoadRequest connection: 172.17.0.2:35446 2024-12-09T10:56:31,092 DEBUG [Time-limited test {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportFileSystemStateWithSplitRegion', row='5', locateType=CURRENT is [region=testExportFileSystemStateWithSplitRegion,,1733741784207.ed0231a21bfb4a829120c4d62b2caafd., hostname=3469f9ca0af3,33293,1733741767044, seqNum=2] 2024-12-09T10:56:31,101 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.HMaster$3(2313): Client=jenkins//172.17.0.2 split testExportFileSystemStateWithSplitRegion,,1733741784207.ed0231a21bfb4a829120c4d62b2caafd. 2024-12-09T10:56:31,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=3469f9ca0af3,33293,1733741767044 2024-12-09T10:56:31,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=ed0231a21bfb4a829120c4d62b2caafd, daughterA=17215edeea503fafe4eb13b967d0d988, daughterB=bdb9883dbaeab14d1681f28ff4927404 2024-12-09T10:56:31,124 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=ed0231a21bfb4a829120c4d62b2caafd, daughterA=17215edeea503fafe4eb13b967d0d988, daughterB=bdb9883dbaeab14d1681f28ff4927404 2024-12-09T10:56:31,124 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=ed0231a21bfb4a829120c4d62b2caafd, daughterA=17215edeea503fafe4eb13b967d0d988, daughterB=bdb9883dbaeab14d1681f28ff4927404 2024-12-09T10:56:31,124 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=ed0231a21bfb4a829120c4d62b2caafd, daughterA=17215edeea503fafe4eb13b967d0d988, daughterB=bdb9883dbaeab14d1681f28ff4927404 2024-12-09T10:56:31,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-09T10:56:31,138 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=ed0231a21bfb4a829120c4d62b2caafd, UNASSIGN}] 2024-12-09T10:56:31,141 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=ed0231a21bfb4a829120c4d62b2caafd, UNASSIGN 2024-12-09T10:56:31,146 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=22 updating hbase:meta row=ed0231a21bfb4a829120c4d62b2caafd, regionState=CLOSING, regionLocation=3469f9ca0af3,33293,1733741767044 2024-12-09T10:56:31,150 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=ed0231a21bfb4a829120c4d62b2caafd, UNASSIGN because future has completed 2024-12-09T10:56:31,150 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-09T10:56:31,151 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE, hasLock=false; CloseRegionProcedure ed0231a21bfb4a829120c4d62b2caafd, server=3469f9ca0af3,33293,1733741767044}] 2024-12-09T10:56:31,184 WARN [Async-Client-Retry-Timer-pool-0 {}] client.AsyncNonMetaRegionLocator(265): Failed to locate region in 'testExportFileSystemStateWithSplitRegion', row='', locateType=CURRENT org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=3469f9ca0af3:39691 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$Stub.scan(ClientProtos.java:43851) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.callOpenScanner(AsyncClientScanner.java:177) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.call(AsyncSingleRequestRpcRetryingCaller.java:84) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$doCall$7(AsyncSingleRequestRpcRetryingCaller.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:887) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2325) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.addListener(FutureUtils.java:64) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:108) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.call(AsyncRpcRetryingCaller.java:222) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory$SingleRequestCallerBuilder.call(AsyncRpcRetryingCallerFactory.java:177) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:242) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.timelineConsistentRead(ConnectionUtils.java:442) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:255) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.start(AsyncClientScanner.java:275) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:617) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:91) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.locateInMeta(AsyncNonMetaRegionLocator.java:408) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocationsInternal(AsyncNonMetaRegionLocator.java:516) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocations(AsyncNonMetaRegionLocator.java:529) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.lambda$getRegionLocation$7(AsyncRegionLocator.java:164) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.tracedLocationFuture(AsyncRegionLocator.java:106) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:158) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:193) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:109) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.lambda$tryScheduleRetry$1(AsyncRpcRetryingCaller.java:139) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.run(HashedWheelTimer.java:713) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.ImmediateExecutor.execute(ImmediateExecutor.java:34) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.expire(HashedWheelTimer.java:701) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelBucket.expireTimeouts(HashedWheelTimer.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 34 more 2024-12-09T10:56:31,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-09T10:56:31,317 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(122): Close ed0231a21bfb4a829120c4d62b2caafd 2024-12-09T10:56:31,318 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-09T10:56:31,319 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1722): Closing ed0231a21bfb4a829120c4d62b2caafd, disabling compactions & flushes 2024-12-09T10:56:31,319 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1733741784207.ed0231a21bfb4a829120c4d62b2caafd. 2024-12-09T10:56:31,319 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1733741784207.ed0231a21bfb4a829120c4d62b2caafd. 2024-12-09T10:56:31,319 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1733741784207.ed0231a21bfb4a829120c4d62b2caafd. after waiting 0 ms 2024-12-09T10:56:31,319 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1733741784207.ed0231a21bfb4a829120c4d62b2caafd. 2024-12-09T10:56:31,340 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/ed0231a21bfb4a829120c4d62b2caafd/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=1 2024-12-09T10:56:31,346 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T10:56:31,347 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1733741784207.ed0231a21bfb4a829120c4d62b2caafd. 2024-12-09T10:56:31,347 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1676): Region close journal for ed0231a21bfb4a829120c4d62b2caafd: Waiting for close lock at 1733741791319Running coprocessor pre-close hooks at 1733741791319Disabling compacts and flushes for region at 1733741791319Disabling writes for close at 1733741791319Writing region close event to WAL at 1733741791322 (+3 ms)Running coprocessor post-close hooks at 1733741791342 (+20 ms)Closed at 1733741791347 (+5 ms) 2024-12-09T10:56:31,356 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(157): Closed ed0231a21bfb4a829120c4d62b2caafd 2024-12-09T10:56:31,357 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=22 updating hbase:meta row=ed0231a21bfb4a829120c4d62b2caafd, regionState=CLOSED 2024-12-09T10:56:31,363 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=23, ppid=22, state=RUNNABLE, hasLock=false; CloseRegionProcedure ed0231a21bfb4a829120c4d62b2caafd, server=3469f9ca0af3,33293,1733741767044 because future has completed 2024-12-09T10:56:31,372 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=23, resume processing ppid=22 2024-12-09T10:56:31,375 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=23, ppid=22, state=SUCCESS, hasLock=false; CloseRegionProcedure ed0231a21bfb4a829120c4d62b2caafd, server=3469f9ca0af3,33293,1733741767044 in 214 msec 2024-12-09T10:56:31,382 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=22, resume processing ppid=21 2024-12-09T10:56:31,383 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=22, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=ed0231a21bfb4a829120c4d62b2caafd, UNASSIGN in 234 msec 2024-12-09T10:56:31,401 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:56:31,406 INFO [PEWorker-3 {}] assignment.SplitTableRegionProcedure(728): pid=21 splitting 1 storefiles, region=ed0231a21bfb4a829120c4d62b2caafd, threads=1 2024-12-09T10:56:31,411 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=21 splitting started for store file: hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/ed0231a21bfb4a829120c4d62b2caafd/cf/d3e7ed4a6467458d8c4d637dae658f50_SeqId_4_ for region: ed0231a21bfb4a829120c4d62b2caafd 2024-12-09T10:56:31,425 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HStoreFile(483): HFile Bloom filter type for d3e7ed4a6467458d8c4d637dae658f50_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-09T10:56:31,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-09T10:56:31,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741864_1040 (size=21) 2024-12-09T10:56:31,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741864_1040 (size=21) 2024-12-09T10:56:31,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741864_1040 (size=21) 2024-12-09T10:56:31,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-09T10:56:31,880 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HStoreFile(483): HFile Bloom filter type for d3e7ed4a6467458d8c4d637dae658f50_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-09T10:56:31,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741865_1041 (size=21) 2024-12-09T10:56:31,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741865_1041 (size=21) 2024-12-09T10:56:31,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741865_1041 (size=21) 2024-12-09T10:56:31,894 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=21 splitting complete for store file: hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/ed0231a21bfb4a829120c4d62b2caafd/cf/d3e7ed4a6467458d8c4d637dae658f50_SeqId_4_ for region: ed0231a21bfb4a829120c4d62b2caafd 2024-12-09T10:56:31,896 DEBUG [PEWorker-3 {}] assignment.SplitTableRegionProcedure(802): pid=21 split storefiles for region ed0231a21bfb4a829120c4d62b2caafd Daughter A: [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/17215edeea503fafe4eb13b967d0d988/cf/d3e7ed4a6467458d8c4d637dae658f50_SeqId_4_.ed0231a21bfb4a829120c4d62b2caafd] storefiles, Daughter B: [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/bdb9883dbaeab14d1681f28ff4927404/cf/d3e7ed4a6467458d8c4d637dae658f50_SeqId_4_.ed0231a21bfb4a829120c4d62b2caafd] storefiles. 2024-12-09T10:56:31,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741866_1042 (size=76) 2024-12-09T10:56:31,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741866_1042 (size=76) 2024-12-09T10:56:31,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741866_1042 (size=76) 2024-12-09T10:56:31,921 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:56:31,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741867_1043 (size=76) 2024-12-09T10:56:31,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741867_1043 (size=76) 2024-12-09T10:56:31,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741867_1043 (size=76) 2024-12-09T10:56:31,947 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:56:31,979 DEBUG [PEWorker-3 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/17215edeea503fafe4eb13b967d0d988/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=-1 2024-12-09T10:56:31,985 DEBUG [PEWorker-3 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/bdb9883dbaeab14d1681f28ff4927404/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=-1 2024-12-09T10:56:31,991 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,,1733741784207.ed0231a21bfb4a829120c4d62b2caafd.","families":{"info":[{"qualifier":"regioninfo","vlen":74,"tag":[],"timestamp":"1733741791990"},{"qualifier":"splitA","vlen":75,"tag":[],"timestamp":"1733741791990"},{"qualifier":"splitB","vlen":75,"tag":[],"timestamp":"1733741791990"}]},"ts":"1733741791990"} 2024-12-09T10:56:31,991 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,,1733741791114.17215edeea503fafe4eb13b967d0d988.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733741791990"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733741791990"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733741791990"}]},"ts":"1733741791990"} 2024-12-09T10:56:31,991 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,5,1733741791114.bdb9883dbaeab14d1681f28ff4927404.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733741791990"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733741791990"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733741791990"}]},"ts":"1733741791990"} 2024-12-09T10:56:32,010 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=17215edeea503fafe4eb13b967d0d988, ASSIGN}, {pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=bdb9883dbaeab14d1681f28ff4927404, ASSIGN}] 2024-12-09T10:56:32,012 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=17215edeea503fafe4eb13b967d0d988, ASSIGN 2024-12-09T10:56:32,012 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=bdb9883dbaeab14d1681f28ff4927404, ASSIGN 2024-12-09T10:56:32,013 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=17215edeea503fafe4eb13b967d0d988, ASSIGN; state=SPLITTING_NEW, location=3469f9ca0af3,33293,1733741767044; forceNewPlan=false, retain=false 2024-12-09T10:56:32,013 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=bdb9883dbaeab14d1681f28ff4927404, ASSIGN; state=SPLITTING_NEW, location=3469f9ca0af3,33293,1733741767044; forceNewPlan=false, retain=false 2024-12-09T10:56:32,164 INFO [3469f9ca0af3:35815 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-09T10:56:32,165 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=25 updating hbase:meta row=bdb9883dbaeab14d1681f28ff4927404, regionState=OPENING, regionLocation=3469f9ca0af3,33293,1733741767044 2024-12-09T10:56:32,165 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=17215edeea503fafe4eb13b967d0d988, regionState=OPENING, regionLocation=3469f9ca0af3,33293,1733741767044 2024-12-09T10:56:32,169 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=17215edeea503fafe4eb13b967d0d988, ASSIGN because future has completed 2024-12-09T10:56:32,169 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=26, ppid=24, state=RUNNABLE, hasLock=false; OpenRegionProcedure 17215edeea503fafe4eb13b967d0d988, server=3469f9ca0af3,33293,1733741767044}] 2024-12-09T10:56:32,170 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=bdb9883dbaeab14d1681f28ff4927404, ASSIGN because future has completed 2024-12-09T10:56:32,171 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=27, ppid=25, state=RUNNABLE, hasLock=false; OpenRegionProcedure bdb9883dbaeab14d1681f28ff4927404, server=3469f9ca0af3,33293,1733741767044}] 2024-12-09T10:56:32,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-09T10:56:32,329 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,5,1733741791114.bdb9883dbaeab14d1681f28ff4927404. 2024-12-09T10:56:32,329 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7752): Opening region: {ENCODED => bdb9883dbaeab14d1681f28ff4927404, NAME => 'testExportFileSystemStateWithSplitRegion,5,1733741791114.bdb9883dbaeab14d1681f28ff4927404.', STARTKEY => '5', ENDKEY => ''} 2024-12-09T10:56:32,330 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,5,1733741791114.bdb9883dbaeab14d1681f28ff4927404. service=AccessControlService 2024-12-09T10:56:32,330 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T10:56:32,330 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion bdb9883dbaeab14d1681f28ff4927404 2024-12-09T10:56:32,330 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,5,1733741791114.bdb9883dbaeab14d1681f28ff4927404.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T10:56:32,330 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7794): checking encryption for bdb9883dbaeab14d1681f28ff4927404 2024-12-09T10:56:32,330 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7797): checking classloading for bdb9883dbaeab14d1681f28ff4927404 2024-12-09T10:56:32,332 INFO [StoreOpener-bdb9883dbaeab14d1681f28ff4927404-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region bdb9883dbaeab14d1681f28ff4927404 2024-12-09T10:56:32,351 INFO [StoreOpener-bdb9883dbaeab14d1681f28ff4927404-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bdb9883dbaeab14d1681f28ff4927404 columnFamilyName cf 2024-12-09T10:56:32,351 DEBUG [StoreOpener-bdb9883dbaeab14d1681f28ff4927404-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:56:32,375 DEBUG [StoreFileOpener-bdb9883dbaeab14d1681f28ff4927404-cf-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for d3e7ed4a6467458d8c4d637dae658f50_SeqId_4_.ed0231a21bfb4a829120c4d62b2caafd: NONE, but ROW specified in column family configuration 2024-12-09T10:56:32,399 DEBUG [StoreOpener-bdb9883dbaeab14d1681f28ff4927404-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/bdb9883dbaeab14d1681f28ff4927404/cf/d3e7ed4a6467458d8c4d637dae658f50_SeqId_4_.ed0231a21bfb4a829120c4d62b2caafd->hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/ed0231a21bfb4a829120c4d62b2caafd/cf/d3e7ed4a6467458d8c4d637dae658f50_SeqId_4_-top 2024-12-09T10:56:32,400 INFO [StoreOpener-bdb9883dbaeab14d1681f28ff4927404-1 {}] regionserver.HStore(327): Store=bdb9883dbaeab14d1681f28ff4927404/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T10:56:32,400 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1038): replaying wal for bdb9883dbaeab14d1681f28ff4927404 2024-12-09T10:56:32,402 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/bdb9883dbaeab14d1681f28ff4927404 2024-12-09T10:56:32,406 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/bdb9883dbaeab14d1681f28ff4927404 2024-12-09T10:56:32,407 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1048): stopping wal replay for bdb9883dbaeab14d1681f28ff4927404 2024-12-09T10:56:32,407 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1060): Cleaning up temporary data for bdb9883dbaeab14d1681f28ff4927404 2024-12-09T10:56:32,414 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1093): writing seq id for bdb9883dbaeab14d1681f28ff4927404 2024-12-09T10:56:32,417 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1114): Opened bdb9883dbaeab14d1681f28ff4927404; next sequenceid=7; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61549712, jitterRate=-0.08283782005310059}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T10:56:32,417 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1122): Running coprocessor post-open hooks for bdb9883dbaeab14d1681f28ff4927404 2024-12-09T10:56:32,419 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1006): Region open journal for bdb9883dbaeab14d1681f28ff4927404: Running coprocessor pre-open hook at 1733741792331Writing region info on filesystem at 1733741792331Initializing all the Stores at 1733741792332 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733741792332Cleaning up temporary data from old regions at 1733741792407 (+75 ms)Running coprocessor post-open hooks at 1733741792417 (+10 ms)Region opened successfully at 1733741792419 (+2 ms) 2024-12-09T10:56:32,421 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,5,1733741791114.bdb9883dbaeab14d1681f28ff4927404., pid=27, masterSystemTime=1733741792324 2024-12-09T10:56:32,422 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.CompactSplit(342): Ignoring compaction request for testExportFileSystemStateWithSplitRegion,5,1733741791114.bdb9883dbaeab14d1681f28ff4927404.,because compaction is disabled. 2024-12-09T10:56:32,426 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,5,1733741791114.bdb9883dbaeab14d1681f28ff4927404. 2024-12-09T10:56:32,426 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,5,1733741791114.bdb9883dbaeab14d1681f28ff4927404. 2024-12-09T10:56:32,426 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,,1733741791114.17215edeea503fafe4eb13b967d0d988. 2024-12-09T10:56:32,426 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7752): Opening region: {ENCODED => 17215edeea503fafe4eb13b967d0d988, NAME => 'testExportFileSystemStateWithSplitRegion,,1733741791114.17215edeea503fafe4eb13b967d0d988.', STARTKEY => '', ENDKEY => '5'} 2024-12-09T10:56:32,427 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=25 updating hbase:meta row=bdb9883dbaeab14d1681f28ff4927404, regionState=OPEN, openSeqNum=7, regionLocation=3469f9ca0af3,33293,1733741767044 2024-12-09T10:56:32,427 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,,1733741791114.17215edeea503fafe4eb13b967d0d988. service=AccessControlService 2024-12-09T10:56:32,427 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T10:56:32,427 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion 17215edeea503fafe4eb13b967d0d988 2024-12-09T10:56:32,429 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1733741791114.17215edeea503fafe4eb13b967d0d988.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T10:56:32,429 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7794): checking encryption for 17215edeea503fafe4eb13b967d0d988 2024-12-09T10:56:32,429 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7797): checking classloading for 17215edeea503fafe4eb13b967d0d988 2024-12-09T10:56:32,434 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=27, ppid=25, state=RUNNABLE, hasLock=false; OpenRegionProcedure bdb9883dbaeab14d1681f28ff4927404, server=3469f9ca0af3,33293,1733741767044 because future has completed 2024-12-09T10:56:32,442 INFO [StoreOpener-17215edeea503fafe4eb13b967d0d988-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 17215edeea503fafe4eb13b967d0d988 2024-12-09T10:56:32,443 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=27, resume processing ppid=25 2024-12-09T10:56:32,443 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=27, ppid=25, state=SUCCESS, hasLock=false; OpenRegionProcedure bdb9883dbaeab14d1681f28ff4927404, server=3469f9ca0af3,33293,1733741767044 in 265 msec 2024-12-09T10:56:32,447 INFO [StoreOpener-17215edeea503fafe4eb13b967d0d988-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 17215edeea503fafe4eb13b967d0d988 columnFamilyName cf 2024-12-09T10:56:32,448 DEBUG [StoreOpener-17215edeea503fafe4eb13b967d0d988-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:56:32,467 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=25, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=bdb9883dbaeab14d1681f28ff4927404, ASSIGN in 434 msec 2024-12-09T10:56:32,478 DEBUG [StoreFileOpener-17215edeea503fafe4eb13b967d0d988-cf-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for d3e7ed4a6467458d8c4d637dae658f50_SeqId_4_.ed0231a21bfb4a829120c4d62b2caafd: NONE, but ROW specified in column family configuration 2024-12-09T10:56:32,488 DEBUG [StoreOpener-17215edeea503fafe4eb13b967d0d988-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/17215edeea503fafe4eb13b967d0d988/cf/d3e7ed4a6467458d8c4d637dae658f50_SeqId_4_.ed0231a21bfb4a829120c4d62b2caafd->hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/ed0231a21bfb4a829120c4d62b2caafd/cf/d3e7ed4a6467458d8c4d637dae658f50_SeqId_4_-bottom 2024-12-09T10:56:32,490 INFO [StoreOpener-17215edeea503fafe4eb13b967d0d988-1 {}] regionserver.HStore(327): Store=17215edeea503fafe4eb13b967d0d988/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T10:56:32,490 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1038): replaying wal for 17215edeea503fafe4eb13b967d0d988 2024-12-09T10:56:32,492 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/17215edeea503fafe4eb13b967d0d988 2024-12-09T10:56:32,496 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/17215edeea503fafe4eb13b967d0d988 2024-12-09T10:56:32,497 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1048): stopping wal replay for 17215edeea503fafe4eb13b967d0d988 2024-12-09T10:56:32,497 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1060): Cleaning up temporary data for 17215edeea503fafe4eb13b967d0d988 2024-12-09T10:56:32,502 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1093): writing seq id for 17215edeea503fafe4eb13b967d0d988 2024-12-09T10:56:32,504 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1114): Opened 17215edeea503fafe4eb13b967d0d988; next sequenceid=7; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71527787, jitterRate=0.06584708392620087}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T10:56:32,504 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 17215edeea503fafe4eb13b967d0d988 2024-12-09T10:56:32,505 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1006): Region open journal for 17215edeea503fafe4eb13b967d0d988: Running coprocessor pre-open hook at 1733741792429Writing region info on filesystem at 1733741792429Initializing all the Stores at 1733741792435 (+6 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733741792435Cleaning up temporary data from old regions at 1733741792497 (+62 ms)Running coprocessor post-open hooks at 1733741792504 (+7 ms)Region opened successfully at 1733741792504 2024-12-09T10:56:32,506 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,,1733741791114.17215edeea503fafe4eb13b967d0d988., pid=26, masterSystemTime=1733741792324 2024-12-09T10:56:32,507 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.CompactSplit(342): Ignoring compaction request for testExportFileSystemStateWithSplitRegion,,1733741791114.17215edeea503fafe4eb13b967d0d988.,because compaction is disabled. 2024-12-09T10:56:32,512 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,,1733741791114.17215edeea503fafe4eb13b967d0d988. 2024-12-09T10:56:32,512 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,,1733741791114.17215edeea503fafe4eb13b967d0d988. 2024-12-09T10:56:32,513 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=17215edeea503fafe4eb13b967d0d988, regionState=OPEN, openSeqNum=7, regionLocation=3469f9ca0af3,33293,1733741767044 2024-12-09T10:56:32,519 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=26, ppid=24, state=RUNNABLE, hasLock=false; OpenRegionProcedure 17215edeea503fafe4eb13b967d0d988, server=3469f9ca0af3,33293,1733741767044 because future has completed 2024-12-09T10:56:32,528 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=26, resume processing ppid=24 2024-12-09T10:56:32,528 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=26, ppid=24, state=SUCCESS, hasLock=false; OpenRegionProcedure 17215edeea503fafe4eb13b967d0d988, server=3469f9ca0af3,33293,1733741767044 in 354 msec 2024-12-09T10:56:32,543 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=24, resume processing ppid=21 2024-12-09T10:56:32,544 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=24, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=17215edeea503fafe4eb13b967d0d988, ASSIGN in 519 msec 2024-12-09T10:56:32,550 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=21, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=ed0231a21bfb4a829120c4d62b2caafd, daughterA=17215edeea503fafe4eb13b967d0d988, daughterB=bdb9883dbaeab14d1681f28ff4927404 in 1.4290 sec 2024-12-09T10:56:33,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-09T10:56:33,273 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SPLIT_REGION, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-09T10:56:33,273 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-09T10:56:33,278 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-09T10:56:33,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733741793278 (current time:1733741793278). 2024-12-09T10:56:33,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T10:56:33,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-12-09T10:56:33,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T10:56:33,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@770115af, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:56:33,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T10:56:33,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T10:56:33,281 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T10:56:33,281 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T10:56:33,281 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T10:56:33,282 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@710d5e74, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:56:33,282 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T10:56:33,282 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T10:56:33,282 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:56:33,283 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55388, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T10:56:33,284 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1edebc27, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:56:33,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T10:56:33,285 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,39691,1733741766880, seqNum=-1] 2024-12-09T10:56:33,285 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T10:56:33,287 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60208, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T10:56:33,288 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815. 2024-12-09T10:56:33,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T10:56:33,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:56:33,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:56:33,289 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T10:56:33,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@43b97306, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:56:33,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T10:56:33,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T10:56:33,291 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T10:56:33,291 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T10:56:33,291 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T10:56:33,292 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@299e7e22, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:56:33,292 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T10:56:33,292 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T10:56:33,293 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:56:33,294 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55406, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T10:56:33,295 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c7ea70d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:56:33,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T10:56:33,296 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,39691,1733741766880, seqNum=-1] 2024-12-09T10:56:33,297 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T10:56:33,298 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60224, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T10:56:33,300 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb., hostname=3469f9ca0af3,39691,1733741766880, seqNum=2] 2024-12-09T10:56:33,302 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815. 2024-12-09T10:56:33,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T10:56:33,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:56:33,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:56:33,303 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T10:56:33,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] access.PermissionStorage(613): Read acl: entry[testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-09T10:56:33,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T10:56:33,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=28, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-09T10:56:33,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 28 2024-12-09T10:56:33,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-09T10:56:33,307 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T10:56:33,308 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T10:56:33,312 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T10:56:33,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741868_1044 (size=197) 2024-12-09T10:56:33,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741868_1044 (size=197) 2024-12-09T10:56:33,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741868_1044 (size=197) 2024-12-09T10:56:33,327 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T10:56:33,327 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 17215edeea503fafe4eb13b967d0d988}, {pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bdb9883dbaeab14d1681f28ff4927404}] 2024-12-09T10:56:33,328 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bdb9883dbaeab14d1681f28ff4927404 2024-12-09T10:56:33,329 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 17215edeea503fafe4eb13b967d0d988 2024-12-09T10:56:33,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-09T10:56:33,481 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33293 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=29 2024-12-09T10:56:33,482 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33293 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=30 2024-12-09T10:56:33,482 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportFileSystemStateWithSplitRegion,5,1733741791114.bdb9883dbaeab14d1681f28ff4927404. 2024-12-09T10:56:33,482 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportFileSystemStateWithSplitRegion,,1733741791114.17215edeea503fafe4eb13b967d0d988. 2024-12-09T10:56:33,482 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.HRegion(2603): Flush status journal for bdb9883dbaeab14d1681f28ff4927404: 2024-12-09T10:56:33,483 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportFileSystemStateWithSplitRegion,5,1733741791114.bdb9883dbaeab14d1681f28ff4927404. for snapshot-testExportFileSystemStateWithSplitRegion completed. 2024-12-09T10:56:33,483 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.HRegion(2603): Flush status journal for 17215edeea503fafe4eb13b967d0d988: 2024-12-09T10:56:33,483 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportFileSystemStateWithSplitRegion,,1733741791114.17215edeea503fafe4eb13b967d0d988. for snapshot-testExportFileSystemStateWithSplitRegion completed. 2024-12-09T10:56:33,483 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(241): Storing 'testExportFileSystemStateWithSplitRegion,5,1733741791114.bdb9883dbaeab14d1681f28ff4927404.' region-info for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-09T10:56:33,483 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T10:56:33,483 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(241): Storing 'testExportFileSystemStateWithSplitRegion,,1733741791114.17215edeea503fafe4eb13b967d0d988.' region-info for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-09T10:56:33,483 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/bdb9883dbaeab14d1681f28ff4927404/cf/d3e7ed4a6467458d8c4d637dae658f50_SeqId_4_.ed0231a21bfb4a829120c4d62b2caafd->hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/ed0231a21bfb4a829120c4d62b2caafd/cf/d3e7ed4a6467458d8c4d637dae658f50_SeqId_4_-top] hfiles 2024-12-09T10:56:33,484 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T10:56:33,484 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/bdb9883dbaeab14d1681f28ff4927404/cf/d3e7ed4a6467458d8c4d637dae658f50_SeqId_4_.ed0231a21bfb4a829120c4d62b2caafd for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-09T10:56:33,484 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/17215edeea503fafe4eb13b967d0d988/cf/d3e7ed4a6467458d8c4d637dae658f50_SeqId_4_.ed0231a21bfb4a829120c4d62b2caafd->hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/ed0231a21bfb4a829120c4d62b2caafd/cf/d3e7ed4a6467458d8c4d637dae658f50_SeqId_4_-bottom] hfiles 2024-12-09T10:56:33,484 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/17215edeea503fafe4eb13b967d0d988/cf/d3e7ed4a6467458d8c4d637dae658f50_SeqId_4_.ed0231a21bfb4a829120c4d62b2caafd for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-09T10:56:33,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741869_1045 (size=182) 2024-12-09T10:56:33,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741869_1045 (size=182) 2024-12-09T10:56:33,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741869_1045 (size=182) 2024-12-09T10:56:33,541 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportFileSystemStateWithSplitRegion,5,1733741791114.bdb9883dbaeab14d1681f28ff4927404. 2024-12-09T10:56:33,541 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=30 2024-12-09T10:56:33,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=30 2024-12-09T10:56:33,542 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportFileSystemStateWithSplitRegion on region bdb9883dbaeab14d1681f28ff4927404 2024-12-09T10:56:33,543 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bdb9883dbaeab14d1681f28ff4927404 2024-12-09T10:56:33,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741870_1046 (size=182) 2024-12-09T10:56:33,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741870_1046 (size=182) 2024-12-09T10:56:33,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741870_1046 (size=182) 2024-12-09T10:56:33,546 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportFileSystemStateWithSplitRegion,,1733741791114.17215edeea503fafe4eb13b967d0d988. 2024-12-09T10:56:33,546 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-12-09T10:56:33,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=29 2024-12-09T10:56:33,547 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportFileSystemStateWithSplitRegion on region 17215edeea503fafe4eb13b967d0d988 2024-12-09T10:56:33,547 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=30, ppid=28, state=SUCCESS, hasLock=false; SnapshotRegionProcedure bdb9883dbaeab14d1681f28ff4927404 in 218 msec 2024-12-09T10:56:33,547 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 17215edeea503fafe4eb13b967d0d988 2024-12-09T10:56:33,553 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=29, resume processing ppid=28 2024-12-09T10:56:33,553 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=29, ppid=28, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 17215edeea503fafe4eb13b967d0d988 in 221 msec 2024-12-09T10:56:33,554 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T10:56:33,556 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-09T10:56:33,556 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-09T10:56:33,556 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:56:33,558 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/ed0231a21bfb4a829120c4d62b2caafd/cf/d3e7ed4a6467458d8c4d637dae658f50_SeqId_4_] hfiles 2024-12-09T10:56:33,558 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/1): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/ed0231a21bfb4a829120c4d62b2caafd/cf/d3e7ed4a6467458d8c4d637dae658f50_SeqId_4_ 2024-12-09T10:56:33,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741871_1047 (size=129) 2024-12-09T10:56:33,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741871_1047 (size=129) 2024-12-09T10:56:33,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741871_1047 (size=129) 2024-12-09T10:56:33,602 INFO [SplitRegionsSnapshotPool-pool-0 {}] procedure.SnapshotProcedure$1(378): take snapshot region={ENCODED => ed0231a21bfb4a829120c4d62b2caafd, NAME => 'testExportFileSystemStateWithSplitRegion,,1733741784207.ed0231a21bfb4a829120c4d62b2caafd.', STARTKEY => '', ENDKEY => '', OFFLINE => true, SPLIT => true}, table=testExportFileSystemStateWithSplitRegion 2024-12-09T10:56:33,604 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T10:56:33,607 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T10:56:33,607 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportFileSystemStateWithSplitRegion 2024-12-09T10:56:33,609 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-09T10:56:33,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-09T10:56:33,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741872_1048 (size=891) 2024-12-09T10:56:33,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741872_1048 (size=891) 2024-12-09T10:56:33,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741872_1048 (size=891) 2024-12-09T10:56:33,681 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T10:56:33,719 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T10:56:33,719 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-09T10:56:33,723 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T10:56:33,723 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 28 2024-12-09T10:56:33,727 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=28, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 419 msec 2024-12-09T10:56:33,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-09T10:56:33,933 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-09T10:56:33,933 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733741793933 2024-12-09T10:56:33,934 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:35869, tgtDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733741793933, rawTgtDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733741793933, srcFsUri=hdfs://localhost:35869, srcDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T10:56:33,990 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:35869, inputRoot=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T10:56:33,991 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1751713061_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733741793933, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733741793933/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-09T10:56:34,012 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-09T10:56:34,057 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733741793933/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-09T10:56:34,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741873_1049 (size=197) 2024-12-09T10:56:34,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741873_1049 (size=197) 2024-12-09T10:56:34,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741873_1049 (size=197) 2024-12-09T10:56:34,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741874_1050 (size=891) 2024-12-09T10:56:34,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741874_1050 (size=891) 2024-12-09T10:56:34,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741874_1050 (size=891) 2024-12-09T10:56:34,209 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T10:56:34,210 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T10:56:34,210 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T10:56:34,498 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T10:56:35,644 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/hadoop-13974553170587003593.jar 2024-12-09T10:56:35,645 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T10:56:35,646 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T10:56:35,742 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/hadoop-472870366542116624.jar 2024-12-09T10:56:35,743 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T10:56:35,743 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T10:56:35,744 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T10:56:35,744 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T10:56:35,745 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T10:56:35,745 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T10:56:35,746 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-09T10:56:35,746 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-09T10:56:35,747 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-09T10:56:35,747 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-09T10:56:35,748 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-09T10:56:35,748 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-09T10:56:35,749 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-09T10:56:35,749 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-09T10:56:35,750 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-09T10:56:35,750 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-09T10:56:35,750 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-09T10:56:35,753 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T10:56:35,754 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T10:56:35,754 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T10:56:35,755 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T10:56:35,755 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T10:56:35,755 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T10:56:35,756 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T10:56:35,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741875_1051 (size=24020) 2024-12-09T10:56:35,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741875_1051 (size=24020) 2024-12-09T10:56:35,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741875_1051 (size=24020) 2024-12-09T10:56:36,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741876_1052 (size=77755) 2024-12-09T10:56:36,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741876_1052 (size=77755) 2024-12-09T10:56:36,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741876_1052 (size=77755) 2024-12-09T10:56:36,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741877_1053 (size=131360) 2024-12-09T10:56:36,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741877_1053 (size=131360) 2024-12-09T10:56:36,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741877_1053 (size=131360) 2024-12-09T10:56:36,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741878_1054 (size=111793) 2024-12-09T10:56:36,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741878_1054 (size=111793) 2024-12-09T10:56:36,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741878_1054 (size=111793) 2024-12-09T10:56:36,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741879_1055 (size=1832290) 2024-12-09T10:56:36,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741879_1055 (size=1832290) 2024-12-09T10:56:36,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741879_1055 (size=1832290) 2024-12-09T10:56:36,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741880_1056 (size=8360282) 2024-12-09T10:56:36,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741880_1056 (size=8360282) 2024-12-09T10:56:36,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741880_1056 (size=8360282) 2024-12-09T10:56:36,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741881_1057 (size=503880) 2024-12-09T10:56:36,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741881_1057 (size=503880) 2024-12-09T10:56:36,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741881_1057 (size=503880) 2024-12-09T10:56:36,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741882_1058 (size=322274) 2024-12-09T10:56:36,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741882_1058 (size=322274) 2024-12-09T10:56:36,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741882_1058 (size=322274) 2024-12-09T10:56:36,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741883_1059 (size=20406) 2024-12-09T10:56:36,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741883_1059 (size=20406) 2024-12-09T10:56:36,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741883_1059 (size=20406) 2024-12-09T10:56:36,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741884_1060 (size=45609) 2024-12-09T10:56:36,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741884_1060 (size=45609) 2024-12-09T10:56:36,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741884_1060 (size=45609) 2024-12-09T10:56:36,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741885_1061 (size=136454) 2024-12-09T10:56:36,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741885_1061 (size=136454) 2024-12-09T10:56:36,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741885_1061 (size=136454) 2024-12-09T10:56:36,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741886_1062 (size=1597136) 2024-12-09T10:56:36,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741886_1062 (size=1597136) 2024-12-09T10:56:36,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741886_1062 (size=1597136) 2024-12-09T10:56:36,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741887_1063 (size=30873) 2024-12-09T10:56:36,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741887_1063 (size=30873) 2024-12-09T10:56:36,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741887_1063 (size=30873) 2024-12-09T10:56:36,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741888_1064 (size=29229) 2024-12-09T10:56:36,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741888_1064 (size=29229) 2024-12-09T10:56:36,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741888_1064 (size=29229) 2024-12-09T10:56:36,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741889_1065 (size=903861) 2024-12-09T10:56:36,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741889_1065 (size=903861) 2024-12-09T10:56:36,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741889_1065 (size=903861) 2024-12-09T10:56:36,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741890_1066 (size=443171) 2024-12-09T10:56:36,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741890_1066 (size=443171) 2024-12-09T10:56:36,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741890_1066 (size=443171) 2024-12-09T10:56:36,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741891_1067 (size=5175431) 2024-12-09T10:56:36,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741891_1067 (size=5175431) 2024-12-09T10:56:36,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741891_1067 (size=5175431) 2024-12-09T10:56:36,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741892_1068 (size=232881) 2024-12-09T10:56:36,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741892_1068 (size=232881) 2024-12-09T10:56:36,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741892_1068 (size=232881) 2024-12-09T10:56:36,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741893_1069 (size=1323991) 2024-12-09T10:56:36,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741893_1069 (size=1323991) 2024-12-09T10:56:36,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741893_1069 (size=1323991) 2024-12-09T10:56:36,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741894_1070 (size=4695811) 2024-12-09T10:56:36,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741894_1070 (size=4695811) 2024-12-09T10:56:36,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741894_1070 (size=4695811) 2024-12-09T10:56:36,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741895_1071 (size=1877034) 2024-12-09T10:56:36,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741895_1071 (size=1877034) 2024-12-09T10:56:36,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741895_1071 (size=1877034) 2024-12-09T10:56:36,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741896_1072 (size=6425021) 2024-12-09T10:56:36,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741896_1072 (size=6425021) 2024-12-09T10:56:36,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741896_1072 (size=6425021) 2024-12-09T10:56:36,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741897_1073 (size=217555) 2024-12-09T10:56:36,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741897_1073 (size=217555) 2024-12-09T10:56:36,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741897_1073 (size=217555) 2024-12-09T10:56:36,849 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T10:56:36,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741898_1074 (size=4188619) 2024-12-09T10:56:36,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741898_1074 (size=4188619) 2024-12-09T10:56:36,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741898_1074 (size=4188619) 2024-12-09T10:56:36,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741899_1075 (size=127628) 2024-12-09T10:56:36,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741899_1075 (size=127628) 2024-12-09T10:56:36,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741899_1075 (size=127628) 2024-12-09T10:56:37,348 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-09T10:56:37,354 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snapshot-testExportFileSystemStateWithSplitRegion' hfile list 2024-12-09T10:56:37,362 DEBUG [Time-limited test {}] snapshot.ExportSnapshot$1(689): Skip the existing file: cf/testExportFileSystemStateWithSplitRegion=ed0231a21bfb4a829120c4d62b2caafd-d3e7ed4a6467458d8c4d637dae658f50_SeqId_4_. 2024-12-09T10:56:37,362 DEBUG [Time-limited test {}] snapshot.ExportSnapshot$1(689): Skip the existing file: cf/testExportFileSystemStateWithSplitRegion=ed0231a21bfb4a829120c4d62b2caafd-d3e7ed4a6467458d8c4d637dae658f50_SeqId_4_. 2024-12-09T10:56:37,363 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=305.6 M 2024-12-09T10:56:37,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741900_1076 (size=244) 2024-12-09T10:56:37,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741900_1076 (size=244) 2024-12-09T10:56:37,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741900_1076 (size=244) 2024-12-09T10:56:37,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741901_1077 (size=17) 2024-12-09T10:56:37,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741901_1077 (size=17) 2024-12-09T10:56:37,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741901_1077 (size=17) 2024-12-09T10:56:37,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741902_1078 (size=304131) 2024-12-09T10:56:37,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741902_1078 (size=304131) 2024-12-09T10:56:37,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741902_1078 (size=304131) 2024-12-09T10:56:38,013 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T10:56:38,013 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T10:56:38,231 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0001_000001 (auth:SIMPLE) from 127.0.0.1:38674 2024-12-09T10:56:48,519 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0001_000001 (auth:SIMPLE) from 127.0.0.1:51282 2024-12-09T10:56:49,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741903_1079 (size=349829) 2024-12-09T10:56:49,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741903_1079 (size=349829) 2024-12-09T10:56:49,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741903_1079 (size=349829) 2024-12-09T10:56:50,928 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0001_000001 (auth:SIMPLE) from 127.0.0.1:56182 2024-12-09T10:56:52,785 INFO [master/3469f9ca0af3:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-09T10:56:52,785 INFO [master/3469f9ca0af3:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-09T10:57:04,498 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T10:57:04,755 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 8d369bd75555122cbc103d82c8629467, had cached 0 bytes from a total of 15277 2024-12-09T10:57:04,763 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region aea25906d47c6460c0b78ec0f095922f, had cached 0 bytes from a total of 6416 2024-12-09T10:57:10,538 DEBUG [master/3469f9ca0af3:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region aea25906d47c6460c0b78ec0f095922f changed from -1.0 to 0.0, refreshing cache 2024-12-09T10:57:10,539 DEBUG [master/3469f9ca0af3:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region a0d08af07fc0beaa578cbd208923b1fb changed from -1.0 to 0.0, refreshing cache 2024-12-09T10:57:10,539 DEBUG [master/3469f9ca0af3:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 8d369bd75555122cbc103d82c8629467 changed from -1.0 to 0.0, refreshing cache 2024-12-09T10:57:17,330 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region bdb9883dbaeab14d1681f28ff4927404, had cached 0 bytes from a total of 320414712 2024-12-09T10:57:17,428 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 17215edeea503fafe4eb13b967d0d988, had cached 0 bytes from a total of 320414712 2024-12-09T10:57:34,498 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T10:57:40,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741904_1080 (size=134217728) 2024-12-09T10:57:40,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741904_1080 (size=134217728) 2024-12-09T10:57:40,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741904_1080 (size=134217728) 2024-12-09T10:57:49,755 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 8d369bd75555122cbc103d82c8629467, had cached 0 bytes from a total of 15277 2024-12-09T10:57:49,764 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region aea25906d47c6460c0b78ec0f095922f, had cached 0 bytes from a total of 6416 2024-12-09T10:58:02,332 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region bdb9883dbaeab14d1681f28ff4927404, had cached 0 bytes from a total of 320414712 2024-12-09T10:58:02,428 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 17215edeea503fafe4eb13b967d0d988, had cached 0 bytes from a total of 320414712 2024-12-09T10:58:04,498 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T10:58:22,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741905_1081 (size=134217728) 2024-12-09T10:58:22,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741905_1081 (size=134217728) 2024-12-09T10:58:22,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741905_1081 (size=134217728) 2024-12-09T10:58:34,499 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T10:58:34,756 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 8d369bd75555122cbc103d82c8629467, had cached 0 bytes from a total of 15277 2024-12-09T10:58:34,764 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region aea25906d47c6460c0b78ec0f095922f, had cached 0 bytes from a total of 6416 2024-12-09T10:58:38,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741906_1082 (size=51979256) 2024-12-09T10:58:38,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741906_1082 (size=51979256) 2024-12-09T10:58:38,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741906_1082 (size=51979256) 2024-12-09T10:58:38,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741907_1083 (size=17520) 2024-12-09T10:58:38,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741907_1083 (size=17520) 2024-12-09T10:58:38,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741907_1083 (size=17520) 2024-12-09T10:58:38,586 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_2/usercache/jenkins/appcache/application_1733741775522_0001/container_1733741775522_0001_01_000002/launch_container.sh] 2024-12-09T10:58:38,586 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_2/usercache/jenkins/appcache/application_1733741775522_0001/container_1733741775522_0001_01_000002/container_tokens] 2024-12-09T10:58:38,586 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_2/usercache/jenkins/appcache/application_1733741775522_0001/container_1733741775522_0001_01_000002/sysfs] 2024-12-09T10:58:38,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741908_1084 (size=483) 2024-12-09T10:58:38,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741908_1084 (size=483) 2024-12-09T10:58:38,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741908_1084 (size=483) 2024-12-09T10:58:39,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741909_1085 (size=17520) 2024-12-09T10:58:39,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741909_1085 (size=17520) 2024-12-09T10:58:39,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741909_1085 (size=17520) 2024-12-09T10:58:39,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741910_1086 (size=349829) 2024-12-09T10:58:39,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741910_1086 (size=349829) 2024-12-09T10:58:39,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741910_1086 (size=349829) 2024-12-09T10:58:41,001 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-09T10:58:41,002 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-09T10:58:41,071 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snapshot-testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:41,072 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-09T10:58:41,078 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-09T10:58:41,078 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1751713061_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion at hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:41,080 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/.snapshotinfo 2024-12-09T10:58:41,080 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/data.manifest 2024-12-09T10:58:41,080 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1751713061_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733741793933/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion at hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733741793933/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:41,081 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733741793933/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/.snapshotinfo 2024-12-09T10:58:41,081 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733741793933/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/data.manifest 2024-12-09T10:58:41,131 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37280, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T10:58:41,134 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:41,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=31, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:41,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-09T10:58:41,165 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733741921164"}]},"ts":"1733741921164"} 2024-12-09T10:58:41,173 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38899, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T10:58:41,188 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=DISABLING in hbase:meta 2024-12-09T10:58:41,188 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testExportFileSystemStateWithSplitRegion to state=DISABLING 2024-12-09T10:58:41,192 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=32, ppid=31, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportFileSystemStateWithSplitRegion}] 2024-12-09T10:58:41,202 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=17215edeea503fafe4eb13b967d0d988, UNASSIGN}, {pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=bdb9883dbaeab14d1681f28ff4927404, UNASSIGN}] 2024-12-09T10:58:41,206 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=bdb9883dbaeab14d1681f28ff4927404, UNASSIGN 2024-12-09T10:58:41,209 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=17215edeea503fafe4eb13b967d0d988, UNASSIGN 2024-12-09T10:58:41,211 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=bdb9883dbaeab14d1681f28ff4927404, regionState=CLOSING, regionLocation=3469f9ca0af3,33293,1733741767044 2024-12-09T10:58:41,212 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=33 updating hbase:meta row=17215edeea503fafe4eb13b967d0d988, regionState=CLOSING, regionLocation=3469f9ca0af3,33293,1733741767044 2024-12-09T10:58:41,218 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=bdb9883dbaeab14d1681f28ff4927404, UNASSIGN because future has completed 2024-12-09T10:58:41,223 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35815 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=3469f9ca0af3,33293,1733741767044, table=testExportFileSystemStateWithSplitRegion, region=17215edeea503fafe4eb13b967d0d988. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-09T10:58:41,223 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35815 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=3469f9ca0af3,33293,1733741767044, table=testExportFileSystemStateWithSplitRegion, region=bdb9883dbaeab14d1681f28ff4927404. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-09T10:58:41,226 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=17215edeea503fafe4eb13b967d0d988, UNASSIGN because future has completed 2024-12-09T10:58:41,230 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T10:58:41,230 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure bdb9883dbaeab14d1681f28ff4927404, server=3469f9ca0af3,33293,1733741767044}] 2024-12-09T10:58:41,239 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T10:58:41,240 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=36, ppid=33, state=RUNNABLE, hasLock=false; CloseRegionProcedure 17215edeea503fafe4eb13b967d0d988, server=3469f9ca0af3,33293,1733741767044}] 2024-12-09T10:58:41,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-09T10:58:41,395 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58115, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T10:58:41,395 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(122): Close 17215edeea503fafe4eb13b967d0d988 2024-12-09T10:58:41,396 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T10:58:41,396 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1722): Closing 17215edeea503fafe4eb13b967d0d988, disabling compactions & flushes 2024-12-09T10:58:41,396 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1733741791114.17215edeea503fafe4eb13b967d0d988. 2024-12-09T10:58:41,396 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1733741791114.17215edeea503fafe4eb13b967d0d988. 2024-12-09T10:58:41,396 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1733741791114.17215edeea503fafe4eb13b967d0d988. after waiting 0 ms 2024-12-09T10:58:41,396 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1733741791114.17215edeea503fafe4eb13b967d0d988. 2024-12-09T10:58:41,406 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/17215edeea503fafe4eb13b967d0d988/recovered.edits/10.seqid, newMaxSeqId=10, maxSeqId=6 2024-12-09T10:58:41,407 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T10:58:41,407 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1733741791114.17215edeea503fafe4eb13b967d0d988. 2024-12-09T10:58:41,407 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1676): Region close journal for 17215edeea503fafe4eb13b967d0d988: Waiting for close lock at 1733741921396Running coprocessor pre-close hooks at 1733741921396Disabling compacts and flushes for region at 1733741921396Disabling writes for close at 1733741921396Writing region close event to WAL at 1733741921397 (+1 ms)Running coprocessor post-close hooks at 1733741921407 (+10 ms)Closed at 1733741921407 2024-12-09T10:58:41,410 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(157): Closed 17215edeea503fafe4eb13b967d0d988 2024-12-09T10:58:41,411 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(122): Close bdb9883dbaeab14d1681f28ff4927404 2024-12-09T10:58:41,411 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T10:58:41,411 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1722): Closing bdb9883dbaeab14d1681f28ff4927404, disabling compactions & flushes 2024-12-09T10:58:41,411 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,5,1733741791114.bdb9883dbaeab14d1681f28ff4927404. 2024-12-09T10:58:41,411 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,5,1733741791114.bdb9883dbaeab14d1681f28ff4927404. 2024-12-09T10:58:41,411 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,5,1733741791114.bdb9883dbaeab14d1681f28ff4927404. after waiting 0 ms 2024-12-09T10:58:41,411 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,5,1733741791114.bdb9883dbaeab14d1681f28ff4927404. 2024-12-09T10:58:41,412 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=33 updating hbase:meta row=17215edeea503fafe4eb13b967d0d988, regionState=CLOSED 2024-12-09T10:58:41,414 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=36, ppid=33, state=RUNNABLE, hasLock=false; CloseRegionProcedure 17215edeea503fafe4eb13b967d0d988, server=3469f9ca0af3,33293,1733741767044 because future has completed 2024-12-09T10:58:41,425 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/bdb9883dbaeab14d1681f28ff4927404/recovered.edits/10.seqid, newMaxSeqId=10, maxSeqId=6 2024-12-09T10:58:41,425 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T10:58:41,426 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,5,1733741791114.bdb9883dbaeab14d1681f28ff4927404. 2024-12-09T10:58:41,426 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1676): Region close journal for bdb9883dbaeab14d1681f28ff4927404: Waiting for close lock at 1733741921411Running coprocessor pre-close hooks at 1733741921411Disabling compacts and flushes for region at 1733741921411Disabling writes for close at 1733741921411Writing region close event to WAL at 1733741921418 (+7 ms)Running coprocessor post-close hooks at 1733741921425 (+7 ms)Closed at 1733741921426 (+1 ms) 2024-12-09T10:58:41,430 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=36, resume processing ppid=33 2024-12-09T10:58:41,430 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=36, ppid=33, state=SUCCESS, hasLock=false; CloseRegionProcedure 17215edeea503fafe4eb13b967d0d988, server=3469f9ca0af3,33293,1733741767044 in 177 msec 2024-12-09T10:58:41,435 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(157): Closed bdb9883dbaeab14d1681f28ff4927404 2024-12-09T10:58:41,438 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=33, ppid=32, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=17215edeea503fafe4eb13b967d0d988, UNASSIGN in 228 msec 2024-12-09T10:58:41,438 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=bdb9883dbaeab14d1681f28ff4927404, regionState=CLOSED 2024-12-09T10:58:41,450 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=35, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure bdb9883dbaeab14d1681f28ff4927404, server=3469f9ca0af3,33293,1733741767044 because future has completed 2024-12-09T10:58:41,469 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=35, resume processing ppid=34 2024-12-09T10:58:41,469 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=35, ppid=34, state=SUCCESS, hasLock=false; CloseRegionProcedure bdb9883dbaeab14d1681f28ff4927404, server=3469f9ca0af3,33293,1733741767044 in 228 msec 2024-12-09T10:58:41,472 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=34, resume processing ppid=32 2024-12-09T10:58:41,472 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=34, ppid=32, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=bdb9883dbaeab14d1681f28ff4927404, UNASSIGN in 267 msec 2024-12-09T10:58:41,478 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=32, resume processing ppid=31 2024-12-09T10:58:41,478 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=32, ppid=31, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportFileSystemStateWithSplitRegion in 282 msec 2024-12-09T10:58:41,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-09T10:58:41,486 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733741921486"}]},"ts":"1733741921486"} 2024-12-09T10:58:41,490 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=DISABLED in hbase:meta 2024-12-09T10:58:41,490 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testExportFileSystemStateWithSplitRegion to state=DISABLED 2024-12-09T10:58:41,502 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=31, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportFileSystemStateWithSplitRegion in 356 msec 2024-12-09T10:58:41,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-09T10:58:41,793 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-09T10:58:41,798 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:41,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=37, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:41,806 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=37, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:41,808 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=37, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:41,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] access.PermissionStorage(261): Removing permissions of removed table testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:41,823 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42561, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientService 2024-12-09T10:58:41,825 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39691 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:41,826 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/ed0231a21bfb4a829120c4d62b2caafd 2024-12-09T10:58:41,834 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/17215edeea503fafe4eb13b967d0d988 2024-12-09T10:58:41,837 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:41,837 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:41,837 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:41,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:41,838 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-09T10:58:41,838 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-09T10:58:41,839 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-09T10:58:41,842 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-09T10:58:41,843 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/ed0231a21bfb4a829120c4d62b2caafd/cf, FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/ed0231a21bfb4a829120c4d62b2caafd/recovered.edits] 2024-12-09T10:58:41,843 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/bdb9883dbaeab14d1681f28ff4927404 2024-12-09T10:58:41,844 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/17215edeea503fafe4eb13b967d0d988/cf, FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/17215edeea503fafe4eb13b967d0d988/recovered.edits] 2024-12-09T10:58:41,844 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:41,844 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T10:58:41,844 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:41,844 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T10:58:41,847 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:41,847 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:41,847 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T10:58:41,847 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T10:58:41,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=37 2024-12-09T10:58:41,853 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T10:58:41,854 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T10:58:41,854 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T10:58:41,854 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/bdb9883dbaeab14d1681f28ff4927404/cf, FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/bdb9883dbaeab14d1681f28ff4927404/recovered.edits] 2024-12-09T10:58:41,855 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T10:58:41,886 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/bdb9883dbaeab14d1681f28ff4927404/cf/d3e7ed4a6467458d8c4d637dae658f50_SeqId_4_.ed0231a21bfb4a829120c4d62b2caafd to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testExportFileSystemStateWithSplitRegion/bdb9883dbaeab14d1681f28ff4927404/cf/d3e7ed4a6467458d8c4d637dae658f50_SeqId_4_.ed0231a21bfb4a829120c4d62b2caafd 2024-12-09T10:58:41,886 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/ed0231a21bfb4a829120c4d62b2caafd/cf/d3e7ed4a6467458d8c4d637dae658f50_SeqId_4_ to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testExportFileSystemStateWithSplitRegion/ed0231a21bfb4a829120c4d62b2caafd/cf/d3e7ed4a6467458d8c4d637dae658f50_SeqId_4_ 2024-12-09T10:58:41,888 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/17215edeea503fafe4eb13b967d0d988/cf/d3e7ed4a6467458d8c4d637dae658f50_SeqId_4_.ed0231a21bfb4a829120c4d62b2caafd to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testExportFileSystemStateWithSplitRegion/17215edeea503fafe4eb13b967d0d988/cf/d3e7ed4a6467458d8c4d637dae658f50_SeqId_4_.ed0231a21bfb4a829120c4d62b2caafd 2024-12-09T10:58:41,921 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/17215edeea503fafe4eb13b967d0d988/recovered.edits/10.seqid to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testExportFileSystemStateWithSplitRegion/17215edeea503fafe4eb13b967d0d988/recovered.edits/10.seqid 2024-12-09T10:58:41,922 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/bdb9883dbaeab14d1681f28ff4927404/recovered.edits/10.seqid to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testExportFileSystemStateWithSplitRegion/bdb9883dbaeab14d1681f28ff4927404/recovered.edits/10.seqid 2024-12-09T10:58:41,922 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/ed0231a21bfb4a829120c4d62b2caafd/recovered.edits/6.seqid to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testExportFileSystemStateWithSplitRegion/ed0231a21bfb4a829120c4d62b2caafd/recovered.edits/6.seqid 2024-12-09T10:58:41,922 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/17215edeea503fafe4eb13b967d0d988 2024-12-09T10:58:41,923 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/bdb9883dbaeab14d1681f28ff4927404 2024-12-09T10:58:41,923 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportFileSystemStateWithSplitRegion/ed0231a21bfb4a829120c4d62b2caafd 2024-12-09T10:58:41,924 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testExportFileSystemStateWithSplitRegion regions 2024-12-09T10:58:41,950 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=37, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:41,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=37 2024-12-09T10:58:41,956 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39691 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-09T10:58:41,964 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 3 rows of testExportFileSystemStateWithSplitRegion from hbase:meta 2024-12-09T10:58:41,972 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testExportFileSystemStateWithSplitRegion' descriptor. 2024-12-09T10:58:41,978 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=37, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:41,978 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testExportFileSystemStateWithSplitRegion' from region states. 2024-12-09T10:58:41,978 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,,1733741784207.ed0231a21bfb4a829120c4d62b2caafd.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733741921978"}]},"ts":"9223372036854775807"} 2024-12-09T10:58:41,978 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,,1733741791114.17215edeea503fafe4eb13b967d0d988.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733741921978"}]},"ts":"9223372036854775807"} 2024-12-09T10:58:41,978 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,5,1733741791114.bdb9883dbaeab14d1681f28ff4927404.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733741921978"}]},"ts":"9223372036854775807"} 2024-12-09T10:58:41,985 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 3 regions from META 2024-12-09T10:58:41,985 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => ed0231a21bfb4a829120c4d62b2caafd, NAME => 'testExportFileSystemStateWithSplitRegion,,1733741784207.ed0231a21bfb4a829120c4d62b2caafd.', STARTKEY => '', ENDKEY => ''}, {ENCODED => 17215edeea503fafe4eb13b967d0d988, NAME => 'testExportFileSystemStateWithSplitRegion,,1733741791114.17215edeea503fafe4eb13b967d0d988.', STARTKEY => '', ENDKEY => '5'}, {ENCODED => bdb9883dbaeab14d1681f28ff4927404, NAME => 'testExportFileSystemStateWithSplitRegion,5,1733741791114.bdb9883dbaeab14d1681f28ff4927404.', STARTKEY => '5', ENDKEY => ''}] 2024-12-09T10:58:41,985 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testExportFileSystemStateWithSplitRegion' as deleted. 2024-12-09T10:58:41,985 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733741921985"}]},"ts":"9223372036854775807"} 2024-12-09T10:58:41,989 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testExportFileSystemStateWithSplitRegion state from META 2024-12-09T10:58:41,990 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=37, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:41,993 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=37, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion in 191 msec 2024-12-09T10:58:42,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=37 2024-12-09T10:58:42,167 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:42,168 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-09T10:58:42,168 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:42,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=38, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:42,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-12-09T10:58:42,175 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733741922174"}]},"ts":"1733741922174"} 2024-12-09T10:58:42,177 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=DISABLING in hbase:meta 2024-12-09T10:58:42,177 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSplitRegion to state=DISABLING 2024-12-09T10:58:42,179 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSplitRegion}] 2024-12-09T10:58:42,182 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=aea25906d47c6460c0b78ec0f095922f, UNASSIGN}, {pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=8d369bd75555122cbc103d82c8629467, UNASSIGN}] 2024-12-09T10:58:42,185 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=8d369bd75555122cbc103d82c8629467, UNASSIGN 2024-12-09T10:58:42,186 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=aea25906d47c6460c0b78ec0f095922f, UNASSIGN 2024-12-09T10:58:42,186 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=41 updating hbase:meta row=8d369bd75555122cbc103d82c8629467, regionState=CLOSING, regionLocation=3469f9ca0af3,42349,1733741767108 2024-12-09T10:58:42,188 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=aea25906d47c6460c0b78ec0f095922f, regionState=CLOSING, regionLocation=3469f9ca0af3,33293,1733741767044 2024-12-09T10:58:42,189 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=8d369bd75555122cbc103d82c8629467, UNASSIGN because future has completed 2024-12-09T10:58:42,190 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T10:58:42,190 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE, hasLock=false; CloseRegionProcedure 8d369bd75555122cbc103d82c8629467, server=3469f9ca0af3,42349,1733741767108}] 2024-12-09T10:58:42,191 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=aea25906d47c6460c0b78ec0f095922f, UNASSIGN because future has completed 2024-12-09T10:58:42,198 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T10:58:42,202 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=43, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure aea25906d47c6460c0b78ec0f095922f, server=3469f9ca0af3,33293,1733741767044}] 2024-12-09T10:58:42,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-12-09T10:58:42,350 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48781, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T10:58:42,351 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(122): Close 8d369bd75555122cbc103d82c8629467 2024-12-09T10:58:42,351 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T10:58:42,351 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1722): Closing 8d369bd75555122cbc103d82c8629467, disabling compactions & flushes 2024-12-09T10:58:42,351 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,1,1733741778756.8d369bd75555122cbc103d82c8629467. 2024-12-09T10:58:42,351 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733741778756.8d369bd75555122cbc103d82c8629467. 2024-12-09T10:58:42,351 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733741778756.8d369bd75555122cbc103d82c8629467. after waiting 0 ms 2024-12-09T10:58:42,351 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,1,1733741778756.8d369bd75555122cbc103d82c8629467. 2024-12-09T10:58:42,362 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(122): Close aea25906d47c6460c0b78ec0f095922f 2024-12-09T10:58:42,362 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T10:58:42,362 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1722): Closing aea25906d47c6460c0b78ec0f095922f, disabling compactions & flushes 2024-12-09T10:58:42,362 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,,1733741778756.aea25906d47c6460c0b78ec0f095922f. 2024-12-09T10:58:42,363 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733741778756.aea25906d47c6460c0b78ec0f095922f. 2024-12-09T10:58:42,363 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733741778756.aea25906d47c6460c0b78ec0f095922f. after waiting 0 ms 2024-12-09T10:58:42,363 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,,1733741778756.aea25906d47c6460c0b78ec0f095922f. 2024-12-09T10:58:42,374 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSplitRegion/8d369bd75555122cbc103d82c8629467/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T10:58:42,378 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T10:58:42,378 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,1,1733741778756.8d369bd75555122cbc103d82c8629467. 2024-12-09T10:58:42,378 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1676): Region close journal for 8d369bd75555122cbc103d82c8629467: Waiting for close lock at 1733741922351Running coprocessor pre-close hooks at 1733741922351Disabling compacts and flushes for region at 1733741922351Disabling writes for close at 1733741922351Writing region close event to WAL at 1733741922352 (+1 ms)Running coprocessor post-close hooks at 1733741922378 (+26 ms)Closed at 1733741922378 2024-12-09T10:58:42,387 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(157): Closed 8d369bd75555122cbc103d82c8629467 2024-12-09T10:58:42,390 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSplitRegion/aea25906d47c6460c0b78ec0f095922f/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T10:58:42,391 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T10:58:42,391 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,,1733741778756.aea25906d47c6460c0b78ec0f095922f. 2024-12-09T10:58:42,391 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1676): Region close journal for aea25906d47c6460c0b78ec0f095922f: Waiting for close lock at 1733741922362Running coprocessor pre-close hooks at 1733741922362Disabling compacts and flushes for region at 1733741922362Disabling writes for close at 1733741922363 (+1 ms)Writing region close event to WAL at 1733741922374 (+11 ms)Running coprocessor post-close hooks at 1733741922391 (+17 ms)Closed at 1733741922391 2024-12-09T10:58:42,392 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=41 updating hbase:meta row=8d369bd75555122cbc103d82c8629467, regionState=CLOSED 2024-12-09T10:58:42,400 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=42, ppid=41, state=RUNNABLE, hasLock=false; CloseRegionProcedure 8d369bd75555122cbc103d82c8629467, server=3469f9ca0af3,42349,1733741767108 because future has completed 2024-12-09T10:58:42,402 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(157): Closed aea25906d47c6460c0b78ec0f095922f 2024-12-09T10:58:42,404 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=aea25906d47c6460c0b78ec0f095922f, regionState=CLOSED 2024-12-09T10:58:42,414 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=42, resume processing ppid=41 2024-12-09T10:58:42,415 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=42, ppid=41, state=SUCCESS, hasLock=false; CloseRegionProcedure 8d369bd75555122cbc103d82c8629467, server=3469f9ca0af3,42349,1733741767108 in 215 msec 2024-12-09T10:58:42,417 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=43, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure aea25906d47c6460c0b78ec0f095922f, server=3469f9ca0af3,33293,1733741767044 because future has completed 2024-12-09T10:58:42,419 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=41, ppid=39, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=8d369bd75555122cbc103d82c8629467, UNASSIGN in 233 msec 2024-12-09T10:58:42,424 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=43, resume processing ppid=40 2024-12-09T10:58:42,424 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=43, ppid=40, state=SUCCESS, hasLock=false; CloseRegionProcedure aea25906d47c6460c0b78ec0f095922f, server=3469f9ca0af3,33293,1733741767044 in 222 msec 2024-12-09T10:58:42,430 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=40, resume processing ppid=39 2024-12-09T10:58:42,430 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=40, ppid=39, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=aea25906d47c6460c0b78ec0f095922f, UNASSIGN in 242 msec 2024-12-09T10:58:42,440 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=39, resume processing ppid=38 2024-12-09T10:58:42,440 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=39, ppid=38, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 256 msec 2024-12-09T10:58:42,444 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733741922443"}]},"ts":"1733741922443"} 2024-12-09T10:58:42,462 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=DISABLED in hbase:meta 2024-12-09T10:58:42,462 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSplitRegion to state=DISABLED 2024-12-09T10:58:42,482 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=38, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 305 msec 2024-12-09T10:58:42,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-12-09T10:58:42,495 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-09T10:58:42,498 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:42,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=44, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:42,505 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=44, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:42,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:42,510 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=44, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:42,521 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39691 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:42,530 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSplitRegion/aea25906d47c6460c0b78ec0f095922f 2024-12-09T10:58:42,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:42,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:42,532 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-09T10:58:42,533 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-09T10:58:42,534 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSplitRegion/aea25906d47c6460c0b78ec0f095922f/cf, FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSplitRegion/aea25906d47c6460c0b78ec0f095922f/recovered.edits] 2024-12-09T10:58:42,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:42,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:42,534 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-09T10:58:42,536 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-09T10:58:42,538 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSplitRegion/8d369bd75555122cbc103d82c8629467 2024-12-09T10:58:42,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:42,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T10:58:42,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:42,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T10:58:42,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:42,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T10:58:42,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:42,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T10:58:42,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-12-09T10:58:42,545 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSplitRegion/8d369bd75555122cbc103d82c8629467/cf, FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSplitRegion/8d369bd75555122cbc103d82c8629467/recovered.edits] 2024-12-09T10:58:42,550 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSplitRegion/aea25906d47c6460c0b78ec0f095922f/cf/cfec7c2745c24305bd43097e4290a8f2 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/aea25906d47c6460c0b78ec0f095922f/cf/cfec7c2745c24305bd43097e4290a8f2 2024-12-09T10:58:42,561 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSplitRegion/8d369bd75555122cbc103d82c8629467/cf/70a1289718844b87a9a78c4b25cfbe2f to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/8d369bd75555122cbc103d82c8629467/cf/70a1289718844b87a9a78c4b25cfbe2f 2024-12-09T10:58:42,562 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSplitRegion/aea25906d47c6460c0b78ec0f095922f/recovered.edits/9.seqid to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/aea25906d47c6460c0b78ec0f095922f/recovered.edits/9.seqid 2024-12-09T10:58:42,564 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSplitRegion/aea25906d47c6460c0b78ec0f095922f 2024-12-09T10:58:42,568 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSplitRegion/8d369bd75555122cbc103d82c8629467/recovered.edits/9.seqid to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/8d369bd75555122cbc103d82c8629467/recovered.edits/9.seqid 2024-12-09T10:58:42,568 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSplitRegion/8d369bd75555122cbc103d82c8629467 2024-12-09T10:58:42,569 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSplitRegion regions 2024-12-09T10:58:42,570 DEBUG [PEWorker-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c 2024-12-09T10:58:42,574 DEBUG [PEWorker-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf] 2024-12-09T10:58:42,581 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b202412091fc32268c7e240f08c8d6214556e1f08_8d369bd75555122cbc103d82c8629467 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b202412091fc32268c7e240f08c8d6214556e1f08_8d369bd75555122cbc103d82c8629467 2024-12-09T10:58:42,590 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e2024120928e7e80ebc08450bb6d52ca748bf0418_aea25906d47c6460c0b78ec0f095922f to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e2024120928e7e80ebc08450bb6d52ca748bf0418_aea25906d47c6460c0b78ec0f095922f 2024-12-09T10:58:42,591 DEBUG [PEWorker-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c 2024-12-09T10:58:42,614 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=44, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:42,630 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSplitRegion from hbase:meta 2024-12-09T10:58:42,641 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithSplitRegion' descriptor. 2024-12-09T10:58:42,646 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=44, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:42,646 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithSplitRegion' from region states. 2024-12-09T10:58:42,647 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion,,1733741778756.aea25906d47c6460c0b78ec0f095922f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733741922646"}]},"ts":"9223372036854775807"} 2024-12-09T10:58:42,647 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion,1,1733741778756.8d369bd75555122cbc103d82c8629467.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733741922646"}]},"ts":"9223372036854775807"} 2024-12-09T10:58:42,654 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-09T10:58:42,654 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => aea25906d47c6460c0b78ec0f095922f, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1733741778756.aea25906d47c6460c0b78ec0f095922f.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 8d369bd75555122cbc103d82c8629467, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1733741778756.8d369bd75555122cbc103d82c8629467.', STARTKEY => '1', ENDKEY => ''}] 2024-12-09T10:58:42,654 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithSplitRegion' as deleted. 2024-12-09T10:58:42,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-12-09T10:58:42,654 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733741922654"}]},"ts":"9223372036854775807"} 2024-12-09T10:58:42,661 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSplitRegion state from META 2024-12-09T10:58:42,663 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=44, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:42,671 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=44, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 165 msec 2024-12-09T10:58:42,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-12-09T10:58:42,863 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:42,863 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-09T10:58:42,905 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-12-09T10:58:42,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:42,920 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-12-09T10:58:42,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:42,934 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-12-09T10:58:42,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:43,027 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSplitRegion Thread=758 (was 713) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-4-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1359318497) connection to localhost/127.0.0.1:38519 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ApplicationMasterLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-1413 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4 app//org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.read(AbstractChannelHandlerContext.java:824) app//org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.read(DefaultChannelPipeline.java:953) app//org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel.read(AbstractChannel.java:289) app//org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.readIfIsAutoRead(DefaultChannelPipeline.java:1369) app//org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelReadComplete(DefaultChannelPipeline.java:1364) app//org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelReadComplete(AbstractChannelHandlerContext.java:482) app//org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelReadComplete(AbstractChannelHandlerContext.java:463) app//org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelReadComplete(DefaultChannelPipeline.java:874) app//org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:171) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/3469f9ca0af3:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: RSProcedureDispatcher-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.read1(BufferedReader.java:213) java.base@17.0.11/java.io.BufferedReader.read(BufferedReader.java:287) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.parseExecResult(Shell.java:1295) app//org.apache.hadoop.util.Shell.runCommand(Shell.java:1054) app//org.apache.hadoop.util.Shell.run(Shell.java:959) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1282) app//org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:349) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.launchContainer(ContainerLaunch.java:600) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:388) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:105) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/3469f9ca0af3:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1751713061_22 at /127.0.0.1:43032 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 31084) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-4-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/3469f9ca0af3:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Container metrics unregistration java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1751713061_22 at /127.0.0.1:42692 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38519 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1751713061_22 at /127.0.0.1:49608 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=781 (was 777) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=884 (was 444) - SystemLoadAverage LEAK? -, ProcessCount=14 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=2714 (was 7665) 2024-12-09T10:58:43,028 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=758 is superior to 500 2024-12-09T10:58:43,079 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportWithTargetName Thread=758, OpenFileDescriptor=781, MaxFileDescriptor=1048576, SystemLoadAverage=884, ProcessCount=14, AvailableMemoryMB=2741 2024-12-09T10:58:43,079 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=758 is superior to 500 2024-12-09T10:58:43,089 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T10:58:43,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=45, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName 2024-12-09T10:58:43,106 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T10:58:43,107 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithTargetName" procId is: 45 2024-12-09T10:58:43,108 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T10:58:43,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-09T10:58:43,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741911_1087 (size=442) 2024-12-09T10:58:43,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741911_1087 (size=442) 2024-12-09T10:58:43,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741911_1087 (size=442) 2024-12-09T10:58:43,199 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 0286d8488fd7b9f26d5346f68f39100c, NAME => 'testtb-testExportWithTargetName,,1733741923088.0286d8488fd7b9f26d5346f68f39100c.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T10:58:43,202 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 8699fe38cfb40bf429b0a89066c7114f, NAME => 'testtb-testExportWithTargetName,1,1733741923088.8699fe38cfb40bf429b0a89066c7114f.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T10:58:43,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-09T10:58:43,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741912_1088 (size=67) 2024-12-09T10:58:43,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741912_1088 (size=67) 2024-12-09T10:58:43,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741912_1088 (size=67) 2024-12-09T10:58:43,318 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1733741923088.0286d8488fd7b9f26d5346f68f39100c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T10:58:43,318 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1722): Closing 0286d8488fd7b9f26d5346f68f39100c, disabling compactions & flushes 2024-12-09T10:58:43,318 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1733741923088.0286d8488fd7b9f26d5346f68f39100c. 2024-12-09T10:58:43,318 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1733741923088.0286d8488fd7b9f26d5346f68f39100c. 2024-12-09T10:58:43,318 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1733741923088.0286d8488fd7b9f26d5346f68f39100c. after waiting 0 ms 2024-12-09T10:58:43,318 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1733741923088.0286d8488fd7b9f26d5346f68f39100c. 2024-12-09T10:58:43,318 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1733741923088.0286d8488fd7b9f26d5346f68f39100c. 2024-12-09T10:58:43,319 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1676): Region close journal for 0286d8488fd7b9f26d5346f68f39100c: Waiting for close lock at 1733741923318Disabling compacts and flushes for region at 1733741923318Disabling writes for close at 1733741923318Writing region close event to WAL at 1733741923318Closed at 1733741923318 2024-12-09T10:58:43,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741913_1089 (size=67) 2024-12-09T10:58:43,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741913_1089 (size=67) 2024-12-09T10:58:43,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741913_1089 (size=67) 2024-12-09T10:58:43,325 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1733741923088.8699fe38cfb40bf429b0a89066c7114f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T10:58:43,325 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1722): Closing 8699fe38cfb40bf429b0a89066c7114f, disabling compactions & flushes 2024-12-09T10:58:43,325 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1733741923088.8699fe38cfb40bf429b0a89066c7114f. 2024-12-09T10:58:43,325 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1733741923088.8699fe38cfb40bf429b0a89066c7114f. 2024-12-09T10:58:43,325 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1733741923088.8699fe38cfb40bf429b0a89066c7114f. after waiting 0 ms 2024-12-09T10:58:43,325 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1733741923088.8699fe38cfb40bf429b0a89066c7114f. 2024-12-09T10:58:43,326 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1733741923088.8699fe38cfb40bf429b0a89066c7114f. 2024-12-09T10:58:43,326 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1676): Region close journal for 8699fe38cfb40bf429b0a89066c7114f: Waiting for close lock at 1733741923325Disabling compacts and flushes for region at 1733741923325Disabling writes for close at 1733741923325Writing region close event to WAL at 1733741923325Closed at 1733741923325 2024-12-09T10:58:43,338 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T10:58:43,338 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,,1733741923088.0286d8488fd7b9f26d5346f68f39100c.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1733741923338"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733741923338"}]},"ts":"1733741923338"} 2024-12-09T10:58:43,338 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,1,1733741923088.8699fe38cfb40bf429b0a89066c7114f.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1733741923338"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733741923338"}]},"ts":"1733741923338"} 2024-12-09T10:58:43,369 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-09T10:58:43,382 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T10:58:43,382 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733741923382"}]},"ts":"1733741923382"} 2024-12-09T10:58:43,390 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLING in hbase:meta 2024-12-09T10:58:43,391 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {3469f9ca0af3=0} racks are {/default-rack=0} 2024-12-09T10:58:43,394 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T10:58:43,394 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T10:58:43,394 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T10:58:43,394 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T10:58:43,394 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T10:58:43,394 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T10:58:43,394 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T10:58:43,395 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T10:58:43,395 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T10:58:43,395 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T10:58:43,395 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=0286d8488fd7b9f26d5346f68f39100c, ASSIGN}, {pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=8699fe38cfb40bf429b0a89066c7114f, ASSIGN}] 2024-12-09T10:58:43,402 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=8699fe38cfb40bf429b0a89066c7114f, ASSIGN 2024-12-09T10:58:43,408 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=0286d8488fd7b9f26d5346f68f39100c, ASSIGN 2024-12-09T10:58:43,410 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=8699fe38cfb40bf429b0a89066c7114f, ASSIGN; state=OFFLINE, location=3469f9ca0af3,42349,1733741767108; forceNewPlan=false, retain=false 2024-12-09T10:58:43,412 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=0286d8488fd7b9f26d5346f68f39100c, ASSIGN; state=OFFLINE, location=3469f9ca0af3,33293,1733741767044; forceNewPlan=false, retain=false 2024-12-09T10:58:43,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-09T10:58:43,562 INFO [3469f9ca0af3:35815 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-09T10:58:43,565 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=8699fe38cfb40bf429b0a89066c7114f, regionState=OPENING, regionLocation=3469f9ca0af3,42349,1733741767108 2024-12-09T10:58:43,565 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=0286d8488fd7b9f26d5346f68f39100c, regionState=OPENING, regionLocation=3469f9ca0af3,33293,1733741767044 2024-12-09T10:58:43,574 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=8699fe38cfb40bf429b0a89066c7114f, ASSIGN because future has completed 2024-12-09T10:58:43,577 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8699fe38cfb40bf429b0a89066c7114f, server=3469f9ca0af3,42349,1733741767108}] 2024-12-09T10:58:43,578 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=0286d8488fd7b9f26d5346f68f39100c, ASSIGN because future has completed 2024-12-09T10:58:43,582 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=49, ppid=46, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0286d8488fd7b9f26d5346f68f39100c, server=3469f9ca0af3,33293,1733741767044}] 2024-12-09T10:58:43,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-09T10:58:43,764 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,,1733741923088.0286d8488fd7b9f26d5346f68f39100c. 2024-12-09T10:58:43,765 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7752): Opening region: {ENCODED => 0286d8488fd7b9f26d5346f68f39100c, NAME => 'testtb-testExportWithTargetName,,1733741923088.0286d8488fd7b9f26d5346f68f39100c.', STARTKEY => '', ENDKEY => '1'} 2024-12-09T10:58:43,765 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,1,1733741923088.8699fe38cfb40bf429b0a89066c7114f. 2024-12-09T10:58:43,766 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,,1733741923088.0286d8488fd7b9f26d5346f68f39100c. service=AccessControlService 2024-12-09T10:58:43,766 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T10:58:43,767 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName 0286d8488fd7b9f26d5346f68f39100c 2024-12-09T10:58:43,767 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1733741923088.0286d8488fd7b9f26d5346f68f39100c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T10:58:43,767 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7794): checking encryption for 0286d8488fd7b9f26d5346f68f39100c 2024-12-09T10:58:43,767 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7797): checking classloading for 0286d8488fd7b9f26d5346f68f39100c 2024-12-09T10:58:43,765 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7752): Opening region: {ENCODED => 8699fe38cfb40bf429b0a89066c7114f, NAME => 'testtb-testExportWithTargetName,1,1733741923088.8699fe38cfb40bf429b0a89066c7114f.', STARTKEY => '1', ENDKEY => ''} 2024-12-09T10:58:43,768 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,1,1733741923088.8699fe38cfb40bf429b0a89066c7114f. service=AccessControlService 2024-12-09T10:58:43,768 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T10:58:43,768 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName 8699fe38cfb40bf429b0a89066c7114f 2024-12-09T10:58:43,769 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1733741923088.8699fe38cfb40bf429b0a89066c7114f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T10:58:43,769 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7794): checking encryption for 8699fe38cfb40bf429b0a89066c7114f 2024-12-09T10:58:43,769 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7797): checking classloading for 8699fe38cfb40bf429b0a89066c7114f 2024-12-09T10:58:43,781 INFO [StoreOpener-8699fe38cfb40bf429b0a89066c7114f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 8699fe38cfb40bf429b0a89066c7114f 2024-12-09T10:58:43,786 INFO [StoreOpener-0286d8488fd7b9f26d5346f68f39100c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 0286d8488fd7b9f26d5346f68f39100c 2024-12-09T10:58:43,794 INFO [StoreOpener-8699fe38cfb40bf429b0a89066c7114f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8699fe38cfb40bf429b0a89066c7114f columnFamilyName cf 2024-12-09T10:58:43,802 INFO [StoreOpener-0286d8488fd7b9f26d5346f68f39100c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0286d8488fd7b9f26d5346f68f39100c columnFamilyName cf 2024-12-09T10:58:43,802 DEBUG [StoreOpener-8699fe38cfb40bf429b0a89066c7114f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:58:43,806 DEBUG [StoreOpener-0286d8488fd7b9f26d5346f68f39100c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:58:43,806 INFO [StoreOpener-8699fe38cfb40bf429b0a89066c7114f-1 {}] regionserver.HStore(327): Store=8699fe38cfb40bf429b0a89066c7114f/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T10:58:43,807 INFO [StoreOpener-0286d8488fd7b9f26d5346f68f39100c-1 {}] regionserver.HStore(327): Store=0286d8488fd7b9f26d5346f68f39100c/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T10:58:43,807 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1038): replaying wal for 8699fe38cfb40bf429b0a89066c7114f 2024-12-09T10:58:43,808 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1038): replaying wal for 0286d8488fd7b9f26d5346f68f39100c 2024-12-09T10:58:43,810 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithTargetName/8699fe38cfb40bf429b0a89066c7114f 2024-12-09T10:58:43,810 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithTargetName/0286d8488fd7b9f26d5346f68f39100c 2024-12-09T10:58:43,822 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithTargetName/0286d8488fd7b9f26d5346f68f39100c 2024-12-09T10:58:43,822 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithTargetName/8699fe38cfb40bf429b0a89066c7114f 2024-12-09T10:58:43,823 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1048): stopping wal replay for 0286d8488fd7b9f26d5346f68f39100c 2024-12-09T10:58:43,823 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1060): Cleaning up temporary data for 0286d8488fd7b9f26d5346f68f39100c 2024-12-09T10:58:43,825 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1048): stopping wal replay for 8699fe38cfb40bf429b0a89066c7114f 2024-12-09T10:58:43,825 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1060): Cleaning up temporary data for 8699fe38cfb40bf429b0a89066c7114f 2024-12-09T10:58:43,825 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1093): writing seq id for 0286d8488fd7b9f26d5346f68f39100c 2024-12-09T10:58:43,829 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1093): writing seq id for 8699fe38cfb40bf429b0a89066c7114f 2024-12-09T10:58:43,834 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithTargetName/0286d8488fd7b9f26d5346f68f39100c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T10:58:43,834 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithTargetName/8699fe38cfb40bf429b0a89066c7114f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T10:58:43,835 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1114): Opened 8699fe38cfb40bf429b0a89066c7114f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68102667, jitterRate=0.014808818697929382}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T10:58:43,835 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 8699fe38cfb40bf429b0a89066c7114f 2024-12-09T10:58:43,836 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1006): Region open journal for 8699fe38cfb40bf429b0a89066c7114f: Running coprocessor pre-open hook at 1733741923769Writing region info on filesystem at 1733741923769Initializing all the Stores at 1733741923772 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733741923772Cleaning up temporary data from old regions at 1733741923825 (+53 ms)Running coprocessor post-open hooks at 1733741923835 (+10 ms)Region opened successfully at 1733741923836 (+1 ms) 2024-12-09T10:58:43,837 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,1,1733741923088.8699fe38cfb40bf429b0a89066c7114f., pid=48, masterSystemTime=1733741923741 2024-12-09T10:58:43,837 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1114): Opened 0286d8488fd7b9f26d5346f68f39100c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60913218, jitterRate=-0.09232231974601746}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T10:58:43,838 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0286d8488fd7b9f26d5346f68f39100c 2024-12-09T10:58:43,838 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1006): Region open journal for 0286d8488fd7b9f26d5346f68f39100c: Running coprocessor pre-open hook at 1733741923767Writing region info on filesystem at 1733741923767Initializing all the Stores at 1733741923773 (+6 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733741923773Cleaning up temporary data from old regions at 1733741923823 (+50 ms)Running coprocessor post-open hooks at 1733741923838 (+15 ms)Region opened successfully at 1733741923838 2024-12-09T10:58:43,842 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,,1733741923088.0286d8488fd7b9f26d5346f68f39100c., pid=49, masterSystemTime=1733741923751 2024-12-09T10:58:43,844 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,1,1733741923088.8699fe38cfb40bf429b0a89066c7114f. 2024-12-09T10:58:43,844 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,1,1733741923088.8699fe38cfb40bf429b0a89066c7114f. 2024-12-09T10:58:43,854 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=8699fe38cfb40bf429b0a89066c7114f, regionState=OPEN, openSeqNum=2, regionLocation=3469f9ca0af3,42349,1733741767108 2024-12-09T10:58:43,858 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,,1733741923088.0286d8488fd7b9f26d5346f68f39100c. 2024-12-09T10:58:43,858 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,,1733741923088.0286d8488fd7b9f26d5346f68f39100c. 2024-12-09T10:58:43,866 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=0286d8488fd7b9f26d5346f68f39100c, regionState=OPEN, openSeqNum=2, regionLocation=3469f9ca0af3,33293,1733741767044 2024-12-09T10:58:43,874 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=48, ppid=47, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8699fe38cfb40bf429b0a89066c7114f, server=3469f9ca0af3,42349,1733741767108 because future has completed 2024-12-09T10:58:43,881 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=49, ppid=46, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0286d8488fd7b9f26d5346f68f39100c, server=3469f9ca0af3,33293,1733741767044 because future has completed 2024-12-09T10:58:43,893 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=48, resume processing ppid=47 2024-12-09T10:58:43,894 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=48, ppid=47, state=SUCCESS, hasLock=false; OpenRegionProcedure 8699fe38cfb40bf429b0a89066c7114f, server=3469f9ca0af3,42349,1733741767108 in 300 msec 2024-12-09T10:58:43,896 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=47, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=8699fe38cfb40bf429b0a89066c7114f, ASSIGN in 499 msec 2024-12-09T10:58:43,896 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=49, resume processing ppid=46 2024-12-09T10:58:43,897 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=49, ppid=46, state=SUCCESS, hasLock=false; OpenRegionProcedure 0286d8488fd7b9f26d5346f68f39100c, server=3469f9ca0af3,33293,1733741767044 in 309 msec 2024-12-09T10:58:43,901 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=46, resume processing ppid=45 2024-12-09T10:58:43,901 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=46, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=0286d8488fd7b9f26d5346f68f39100c, ASSIGN in 501 msec 2024-12-09T10:58:43,902 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T10:58:43,903 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733741923903"}]},"ts":"1733741923903"} 2024-12-09T10:58:43,907 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLED in hbase:meta 2024-12-09T10:58:43,908 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T10:58:43,908 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithTargetName jenkins: RWXCA 2024-12-09T10:58:43,923 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39691 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-09T10:58:43,933 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T10:58:43,933 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T10:58:43,933 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T10:58:43,933 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T10:58:43,952 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-09T10:58:43,953 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-09T10:58:43,953 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-09T10:58:43,953 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-09T10:58:43,960 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=45, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName in 864 msec 2024-12-09T10:58:44,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-09T10:58:44,253 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithTargetName completed 2024-12-09T10:58:44,253 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-09T10:58:44,256 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34982, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T10:58:44,260 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithTargetName 2024-12-09T10:58:44,260 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithTargetName,,1733741923088.0286d8488fd7b9f26d5346f68f39100c. 2024-12-09T10:58:44,260 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T10:58:44,264 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-09T10:58:44,274 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-09T10:58:44,279 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43682, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T10:58:44,286 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59812, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T10:58:44,290 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-09T10:58:44,297 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-09T10:58:44,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733741924300 (current time:1733741924300). 2024-12-09T10:58:44,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T10:58:44,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-09T10:58:44,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T10:58:44,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1440bf62, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:58:44,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T10:58:44,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T10:58:44,303 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T10:58:44,304 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T10:58:44,304 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T10:58:44,304 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62ab47f1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:58:44,304 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T10:58:44,305 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T10:58:44,305 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:58:44,309 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37302, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T10:58:44,326 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@83629c1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:58:44,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T10:58:44,330 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,39691,1733741766880, seqNum=-1] 2024-12-09T10:58:44,331 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T10:58:44,338 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34998, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T10:58:44,340 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815. 2024-12-09T10:58:44,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T10:58:44,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:58:44,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:58:44,341 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T10:58:44,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37f7f6c7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:58:44,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T10:58:44,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T10:58:44,347 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T10:58:44,347 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T10:58:44,350 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T10:58:44,351 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5906579f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:58:44,351 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T10:58:44,351 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T10:58:44,351 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:58:44,353 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37328, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T10:58:44,354 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49073b3b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:58:44,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T10:58:44,359 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,39691,1733741766880, seqNum=-1] 2024-12-09T10:58:44,360 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T10:58:44,361 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35008, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T10:58:44,364 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb., hostname=3469f9ca0af3,39691,1733741766880, seqNum=2] 2024-12-09T10:58:44,366 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815. 2024-12-09T10:58:44,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T10:58:44,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:58:44,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:58:44,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-09T10:58:44,367 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T10:58:44,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T10:58:44,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=50, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-09T10:58:44,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 50 2024-12-09T10:58:44,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-12-09T10:58:44,381 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T10:58:44,384 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T10:58:44,414 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T10:58:44,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741914_1090 (size=167) 2024-12-09T10:58:44,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741914_1090 (size=167) 2024-12-09T10:58:44,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741914_1090 (size=167) 2024-12-09T10:58:44,473 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T10:58:44,473 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0286d8488fd7b9f26d5346f68f39100c}, {pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8699fe38cfb40bf429b0a89066c7114f}] 2024-12-09T10:58:44,475 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0286d8488fd7b9f26d5346f68f39100c 2024-12-09T10:58:44,475 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8699fe38cfb40bf429b0a89066c7114f 2024-12-09T10:58:44,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-12-09T10:58:44,635 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33293 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=51 2024-12-09T10:58:44,636 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1733741923088.0286d8488fd7b9f26d5346f68f39100c. 2024-12-09T10:58:44,636 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.HRegion(2603): Flush status journal for 0286d8488fd7b9f26d5346f68f39100c: 2024-12-09T10:58:44,636 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1733741923088.0286d8488fd7b9f26d5346f68f39100c. for emptySnaptb0-testExportWithTargetName completed. 2024-12-09T10:58:44,636 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1733741923088.0286d8488fd7b9f26d5346f68f39100c.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-09T10:58:44,636 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T10:58:44,636 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-09T10:58:44,637 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42349 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=52 2024-12-09T10:58:44,640 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1733741923088.8699fe38cfb40bf429b0a89066c7114f. 2024-12-09T10:58:44,640 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.HRegion(2603): Flush status journal for 8699fe38cfb40bf429b0a89066c7114f: 2024-12-09T10:58:44,640 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1733741923088.8699fe38cfb40bf429b0a89066c7114f. for emptySnaptb0-testExportWithTargetName completed. 2024-12-09T10:58:44,640 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1733741923088.8699fe38cfb40bf429b0a89066c7114f.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-09T10:58:44,641 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T10:58:44,641 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-09T10:58:44,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-12-09T10:58:44,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741915_1091 (size=70) 2024-12-09T10:58:44,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741915_1091 (size=70) 2024-12-09T10:58:44,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741915_1091 (size=70) 2024-12-09T10:58:44,708 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1733741923088.0286d8488fd7b9f26d5346f68f39100c. 2024-12-09T10:58:44,708 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=51 2024-12-09T10:58:44,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=51 2024-12-09T10:58:44,710 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region 0286d8488fd7b9f26d5346f68f39100c 2024-12-09T10:58:44,711 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0286d8488fd7b9f26d5346f68f39100c 2024-12-09T10:58:44,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741916_1092 (size=70) 2024-12-09T10:58:44,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741916_1092 (size=70) 2024-12-09T10:58:44,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741916_1092 (size=70) 2024-12-09T10:58:44,721 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1733741923088.8699fe38cfb40bf429b0a89066c7114f. 2024-12-09T10:58:44,721 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-12-09T10:58:44,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=52 2024-12-09T10:58:44,722 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region 8699fe38cfb40bf429b0a89066c7114f 2024-12-09T10:58:44,723 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8699fe38cfb40bf429b0a89066c7114f 2024-12-09T10:58:44,746 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=51, ppid=50, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 0286d8488fd7b9f26d5346f68f39100c in 252 msec 2024-12-09T10:58:44,757 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=52, resume processing ppid=50 2024-12-09T10:58:44,757 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T10:58:44,757 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=52, ppid=50, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 8699fe38cfb40bf429b0a89066c7114f in 253 msec 2024-12-09T10:58:44,759 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T10:58:44,770 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-09T10:58:44,770 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-09T10:58:44,770 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:58:44,779 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-09T10:58:44,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741917_1093 (size=62) 2024-12-09T10:58:44,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741917_1093 (size=62) 2024-12-09T10:58:44,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741917_1093 (size=62) 2024-12-09T10:58:44,849 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T10:58:44,849 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithTargetName 2024-12-09T10:58:44,851 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName 2024-12-09T10:58:44,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741918_1094 (size=649) 2024-12-09T10:58:45,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741918_1094 (size=649) 2024-12-09T10:58:45,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741918_1094 (size=649) 2024-12-09T10:58:45,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-12-09T10:58:45,029 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T10:58:45,040 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithTargetName' 2024-12-09T10:58:45,089 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T10:58:45,090 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/emptySnaptb0-testExportWithTargetName 2024-12-09T10:58:45,094 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T10:58:45,094 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 50 2024-12-09T10:58:45,114 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=50, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 726 msec 2024-12-09T10:58:45,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-12-09T10:58:45,526 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-12-09T10:58:45,535 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33293 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,,1733741923088.0286d8488fd7b9f26d5346f68f39100c. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T10:58:45,542 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0001_000001 (auth:SIMPLE) from 127.0.0.1:43380 2024-12-09T10:58:45,546 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42349 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,1,1733741923088.8699fe38cfb40bf429b0a89066c7114f. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T10:58:45,552 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-09T10:58:45,562 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithTargetName 2024-12-09T10:58:45,563 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithTargetName,,1733741923088.0286d8488fd7b9f26d5346f68f39100c. 2024-12-09T10:58:45,563 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T10:58:45,566 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-09T10:58:45,578 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-09T10:58:45,588 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_3/usercache/jenkins/appcache/application_1733741775522_0001/container_1733741775522_0001_01_000001/launch_container.sh] 2024-12-09T10:58:45,588 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_3/usercache/jenkins/appcache/application_1733741775522_0001/container_1733741775522_0001_01_000001/container_tokens] 2024-12-09T10:58:45,588 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_3/usercache/jenkins/appcache/application_1733741775522_0001/container_1733741775522_0001_01_000001/sysfs] 2024-12-09T10:58:45,589 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-09T10:58:45,599 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-09T10:58:45,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733741925600 (current time:1733741925600). 2024-12-09T10:58:45,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T10:58:45,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-09T10:58:45,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T10:58:45,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12d53144, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:58:45,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T10:58:45,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T10:58:45,604 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T10:58:45,604 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T10:58:45,604 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T10:58:45,605 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22cb2e75, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:58:45,605 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T10:58:45,605 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T10:58:45,605 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:58:45,607 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37332, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T10:58:45,608 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51b59320, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:58:45,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T10:58:45,611 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,39691,1733741766880, seqNum=-1] 2024-12-09T10:58:45,611 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T10:58:45,613 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35012, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T10:58:45,615 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815. 2024-12-09T10:58:45,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T10:58:45,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:58:45,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:58:45,616 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T10:58:45,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3779efd9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:58:45,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T10:58:45,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T10:58:45,629 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T10:58:45,629 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T10:58:45,629 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T10:58:45,630 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47764889, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:58:45,630 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T10:58:45,630 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T10:58:45,630 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:58:45,632 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37344, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T10:58:45,633 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@148c0843, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:58:45,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T10:58:45,640 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,39691,1733741766880, seqNum=-1] 2024-12-09T10:58:45,641 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T10:58:45,643 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35024, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T10:58:45,647 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb., hostname=3469f9ca0af3,39691,1733741766880, seqNum=2] 2024-12-09T10:58:45,651 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815. 2024-12-09T10:58:45,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T10:58:45,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:58:45,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:58:45,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-09T10:58:45,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T10:58:45,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=53, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-09T10:58:45,656 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T10:58:45,656 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T10:58:45,657 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T10:58:45,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 53 2024-12-09T10:58:45,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-09T10:58:45,665 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T10:58:45,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741919_1095 (size=162) 2024-12-09T10:58:45,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741919_1095 (size=162) 2024-12-09T10:58:45,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741919_1095 (size=162) 2024-12-09T10:58:45,719 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T10:58:45,722 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0286d8488fd7b9f26d5346f68f39100c}, {pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8699fe38cfb40bf429b0a89066c7114f}] 2024-12-09T10:58:45,725 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8699fe38cfb40bf429b0a89066c7114f 2024-12-09T10:58:45,726 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0286d8488fd7b9f26d5346f68f39100c 2024-12-09T10:58:45,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-09T10:58:45,881 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33293 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=54 2024-12-09T10:58:45,881 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1733741923088.0286d8488fd7b9f26d5346f68f39100c. 2024-12-09T10:58:45,882 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(2902): Flushing 0286d8488fd7b9f26d5346f68f39100c 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-09T10:58:45,882 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42349 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=55 2024-12-09T10:58:45,886 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1733741923088.8699fe38cfb40bf429b0a89066c7114f. 2024-12-09T10:58:45,886 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(2902): Flushing 8699fe38cfb40bf429b0a89066c7114f 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-09T10:58:45,920 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412094df597dbeff6400993e0a49fe90a0806_0286d8488fd7b9f26d5346f68f39100c is 71, key is 0d18db7e28916ef20f67a8bca4c30775/cf:q/1733741925535/Put/seqid=0 2024-12-09T10:58:45,938 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241209871c8cbef4e24ec9a091380c7ecdecdb_8699fe38cfb40bf429b0a89066c7114f is 71, key is 14221c24c2d5dd98b6892f6425acf971/cf:q/1733741925542/Put/seqid=0 2024-12-09T10:58:45,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741920_1096 (size=5032) 2024-12-09T10:58:45,975 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:58:45,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741920_1096 (size=5032) 2024-12-09T10:58:45,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741920_1096 (size=5032) 2024-12-09T10:58:45,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-09T10:58:45,985 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412094df597dbeff6400993e0a49fe90a0806_0286d8488fd7b9f26d5346f68f39100c to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e202412094df597dbeff6400993e0a49fe90a0806_0286d8488fd7b9f26d5346f68f39100c 2024-12-09T10:58:45,986 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithTargetName/0286d8488fd7b9f26d5346f68f39100c/.tmp/cf/8ca9f58696384b9a986c4c2465fa4ddd, store: [table=testtb-testExportWithTargetName family=cf region=0286d8488fd7b9f26d5346f68f39100c] 2024-12-09T10:58:45,987 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithTargetName/0286d8488fd7b9f26d5346f68f39100c/.tmp/cf/8ca9f58696384b9a986c4c2465fa4ddd is 208, key is 03ac13b8769e018a705af45547488cdea/cf:q/1733741925535/Put/seqid=0 2024-12-09T10:58:45,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741921_1097 (size=8242) 2024-12-09T10:58:45,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741921_1097 (size=8242) 2024-12-09T10:58:45,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741921_1097 (size=8242) 2024-12-09T10:58:46,002 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:58:46,015 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241209871c8cbef4e24ec9a091380c7ecdecdb_8699fe38cfb40bf429b0a89066c7114f to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b20241209871c8cbef4e24ec9a091380c7ecdecdb_8699fe38cfb40bf429b0a89066c7114f 2024-12-09T10:58:46,017 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithTargetName/8699fe38cfb40bf429b0a89066c7114f/.tmp/cf/57cbf106448440ceb47f78dcdab18169, store: [table=testtb-testExportWithTargetName family=cf region=8699fe38cfb40bf429b0a89066c7114f] 2024-12-09T10:58:46,017 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithTargetName/8699fe38cfb40bf429b0a89066c7114f/.tmp/cf/57cbf106448440ceb47f78dcdab18169 is 208, key is 116167bbbcb31895afbb664723bc44e31/cf:q/1733741925542/Put/seqid=0 2024-12-09T10:58:46,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741922_1098 (size=5706) 2024-12-09T10:58:46,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741922_1098 (size=5706) 2024-12-09T10:58:46,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741922_1098 (size=5706) 2024-12-09T10:58:46,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741923_1099 (size=15155) 2024-12-09T10:58:46,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741923_1099 (size=15155) 2024-12-09T10:58:46,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741923_1099 (size=15155) 2024-12-09T10:58:46,052 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithTargetName/8699fe38cfb40bf429b0a89066c7114f/.tmp/cf/57cbf106448440ceb47f78dcdab18169 2024-12-09T10:58:46,067 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithTargetName/8699fe38cfb40bf429b0a89066c7114f/.tmp/cf/57cbf106448440ceb47f78dcdab18169 as hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithTargetName/8699fe38cfb40bf429b0a89066c7114f/cf/57cbf106448440ceb47f78dcdab18169 2024-12-09T10:58:46,082 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithTargetName/8699fe38cfb40bf429b0a89066c7114f/cf/57cbf106448440ceb47f78dcdab18169, entries=48, sequenceid=6, filesize=14.8 K 2024-12-09T10:58:46,086 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 8699fe38cfb40bf429b0a89066c7114f in 199ms, sequenceid=6, compaction requested=false 2024-12-09T10:58:46,086 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(2603): Flush status journal for 8699fe38cfb40bf429b0a89066c7114f: 2024-12-09T10:58:46,086 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1733741923088.8699fe38cfb40bf429b0a89066c7114f. for snaptb0-testExportWithTargetName completed. 2024-12-09T10:58:46,086 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1733741923088.8699fe38cfb40bf429b0a89066c7114f.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-09T10:58:46,086 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T10:58:46,086 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithTargetName/8699fe38cfb40bf429b0a89066c7114f/cf/57cbf106448440ceb47f78dcdab18169] hfiles 2024-12-09T10:58:46,086 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithTargetName/8699fe38cfb40bf429b0a89066c7114f/cf/57cbf106448440ceb47f78dcdab18169 for snapshot=snaptb0-testExportWithTargetName 2024-12-09T10:58:46,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741924_1100 (size=109) 2024-12-09T10:58:46,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741924_1100 (size=109) 2024-12-09T10:58:46,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741924_1100 (size=109) 2024-12-09T10:58:46,155 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1733741923088.8699fe38cfb40bf429b0a89066c7114f. 2024-12-09T10:58:46,155 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=55 2024-12-09T10:58:46,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=55 2024-12-09T10:58:46,158 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region 8699fe38cfb40bf429b0a89066c7114f 2024-12-09T10:58:46,158 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8699fe38cfb40bf429b0a89066c7114f 2024-12-09T10:58:46,173 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=55, ppid=53, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 8699fe38cfb40bf429b0a89066c7114f in 444 msec 2024-12-09T10:58:46,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-09T10:58:46,427 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=132, hasBloomFilter=true, into tmp file hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithTargetName/0286d8488fd7b9f26d5346f68f39100c/.tmp/cf/8ca9f58696384b9a986c4c2465fa4ddd 2024-12-09T10:58:46,446 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithTargetName/0286d8488fd7b9f26d5346f68f39100c/.tmp/cf/8ca9f58696384b9a986c4c2465fa4ddd as hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithTargetName/0286d8488fd7b9f26d5346f68f39100c/cf/8ca9f58696384b9a986c4c2465fa4ddd 2024-12-09T10:58:46,456 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithTargetName/0286d8488fd7b9f26d5346f68f39100c/cf/8ca9f58696384b9a986c4c2465fa4ddd, entries=2, sequenceid=6, filesize=5.6 K 2024-12-09T10:58:46,462 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 0286d8488fd7b9f26d5346f68f39100c in 581ms, sequenceid=6, compaction requested=false 2024-12-09T10:58:46,462 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(2603): Flush status journal for 0286d8488fd7b9f26d5346f68f39100c: 2024-12-09T10:58:46,462 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1733741923088.0286d8488fd7b9f26d5346f68f39100c. for snaptb0-testExportWithTargetName completed. 2024-12-09T10:58:46,462 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1733741923088.0286d8488fd7b9f26d5346f68f39100c.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-09T10:58:46,462 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T10:58:46,462 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithTargetName/0286d8488fd7b9f26d5346f68f39100c/cf/8ca9f58696384b9a986c4c2465fa4ddd] hfiles 2024-12-09T10:58:46,462 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithTargetName/0286d8488fd7b9f26d5346f68f39100c/cf/8ca9f58696384b9a986c4c2465fa4ddd for snapshot=snaptb0-testExportWithTargetName 2024-12-09T10:58:46,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741925_1101 (size=109) 2024-12-09T10:58:46,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741925_1101 (size=109) 2024-12-09T10:58:46,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741925_1101 (size=109) 2024-12-09T10:58:46,514 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1733741923088.0286d8488fd7b9f26d5346f68f39100c. 2024-12-09T10:58:46,514 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-12-09T10:58:46,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=54 2024-12-09T10:58:46,515 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region 0286d8488fd7b9f26d5346f68f39100c 2024-12-09T10:58:46,515 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0286d8488fd7b9f26d5346f68f39100c 2024-12-09T10:58:46,524 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=54, resume processing ppid=53 2024-12-09T10:58:46,525 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=54, ppid=53, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 0286d8488fd7b9f26d5346f68f39100c in 795 msec 2024-12-09T10:58:46,525 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T10:58:46,526 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T10:58:46,527 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-09T10:58:46,527 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-09T10:58:46,527 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:58:46,536 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b20241209871c8cbef4e24ec9a091380c7ecdecdb_8699fe38cfb40bf429b0a89066c7114f, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e202412094df597dbeff6400993e0a49fe90a0806_0286d8488fd7b9f26d5346f68f39100c] hfiles 2024-12-09T10:58:46,536 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b20241209871c8cbef4e24ec9a091380c7ecdecdb_8699fe38cfb40bf429b0a89066c7114f 2024-12-09T10:58:46,536 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e202412094df597dbeff6400993e0a49fe90a0806_0286d8488fd7b9f26d5346f68f39100c 2024-12-09T10:58:46,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741926_1102 (size=293) 2024-12-09T10:58:46,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741926_1102 (size=293) 2024-12-09T10:58:46,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741926_1102 (size=293) 2024-12-09T10:58:46,621 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T10:58:46,621 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithTargetName 2024-12-09T10:58:46,626 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName 2024-12-09T10:58:46,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741927_1103 (size=959) 2024-12-09T10:58:46,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741927_1103 (size=959) 2024-12-09T10:58:46,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741927_1103 (size=959) 2024-12-09T10:58:46,710 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T10:58:46,726 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T10:58:46,727 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-09T10:58:46,743 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T10:58:46,743 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 53 2024-12-09T10:58:46,750 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=53, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 1.0900 sec 2024-12-09T10:58:46,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-09T10:58:46,803 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-12-09T10:58:46,803 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733741926803 2024-12-09T10:58:46,803 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:35869, tgtDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733741926803, rawTgtDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733741926803, srcFsUri=hdfs://localhost:35869, srcDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T10:58:46,849 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:35869, inputRoot=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T10:58:46,849 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1751713061_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733741926803, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733741926803/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-09T10:58:46,852 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-09T10:58:46,887 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testExportWithTargetName to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733741926803/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-09T10:58:46,951 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T10:58:46,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741928_1104 (size=162) 2024-12-09T10:58:46,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741928_1104 (size=162) 2024-12-09T10:58:46,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741928_1104 (size=162) 2024-12-09T10:58:47,042 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-09T10:58:47,042 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName Metrics about Tables on a single HBase RegionServer 2024-12-09T10:58:47,043 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:47,044 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T10:58:47,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741929_1105 (size=959) 2024-12-09T10:58:47,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741929_1105 (size=959) 2024-12-09T10:58:47,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741929_1105 (size=959) 2024-12-09T10:58:47,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741930_1106 (size=154) 2024-12-09T10:58:47,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741930_1106 (size=154) 2024-12-09T10:58:47,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741930_1106 (size=154) 2024-12-09T10:58:47,525 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T10:58:47,526 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T10:58:47,526 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T10:58:48,967 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/hadoop-2423637381275216858.jar 2024-12-09T10:58:48,967 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T10:58:48,968 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T10:58:49,062 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/hadoop-7036950481422921101.jar 2024-12-09T10:58:49,063 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T10:58:49,063 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T10:58:49,064 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T10:58:49,064 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T10:58:49,064 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T10:58:49,065 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T10:58:49,065 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-09T10:58:49,065 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-09T10:58:49,066 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-09T10:58:49,066 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-09T10:58:49,066 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-09T10:58:49,067 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-09T10:58:49,067 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-09T10:58:49,067 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-09T10:58:49,068 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-09T10:58:49,068 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-09T10:58:49,068 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-09T10:58:49,069 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T10:58:49,069 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T10:58:49,069 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T10:58:49,070 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T10:58:49,070 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T10:58:49,071 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T10:58:49,071 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T10:58:49,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741931_1107 (size=24020) 2024-12-09T10:58:49,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741931_1107 (size=24020) 2024-12-09T10:58:49,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741931_1107 (size=24020) 2024-12-09T10:58:49,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741932_1108 (size=77755) 2024-12-09T10:58:49,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741932_1108 (size=77755) 2024-12-09T10:58:49,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741932_1108 (size=77755) 2024-12-09T10:58:49,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741933_1109 (size=131360) 2024-12-09T10:58:49,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741933_1109 (size=131360) 2024-12-09T10:58:49,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741933_1109 (size=131360) 2024-12-09T10:58:49,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741934_1110 (size=111793) 2024-12-09T10:58:49,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741934_1110 (size=111793) 2024-12-09T10:58:49,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741934_1110 (size=111793) 2024-12-09T10:58:49,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741935_1111 (size=1832290) 2024-12-09T10:58:49,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741935_1111 (size=1832290) 2024-12-09T10:58:49,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741935_1111 (size=1832290) 2024-12-09T10:58:49,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741936_1112 (size=8360282) 2024-12-09T10:58:49,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741936_1112 (size=8360282) 2024-12-09T10:58:49,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741936_1112 (size=8360282) 2024-12-09T10:58:49,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741937_1113 (size=503880) 2024-12-09T10:58:49,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741937_1113 (size=503880) 2024-12-09T10:58:49,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741937_1113 (size=503880) 2024-12-09T10:58:50,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741938_1114 (size=322274) 2024-12-09T10:58:50,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741938_1114 (size=322274) 2024-12-09T10:58:50,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741938_1114 (size=322274) 2024-12-09T10:58:50,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741939_1115 (size=20406) 2024-12-09T10:58:50,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741939_1115 (size=20406) 2024-12-09T10:58:50,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741939_1115 (size=20406) 2024-12-09T10:58:50,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741940_1116 (size=45609) 2024-12-09T10:58:50,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741940_1116 (size=45609) 2024-12-09T10:58:50,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741940_1116 (size=45609) 2024-12-09T10:58:50,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741941_1117 (size=136454) 2024-12-09T10:58:50,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741941_1117 (size=136454) 2024-12-09T10:58:50,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741941_1117 (size=136454) 2024-12-09T10:58:50,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741942_1118 (size=1597136) 2024-12-09T10:58:50,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741942_1118 (size=1597136) 2024-12-09T10:58:50,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741942_1118 (size=1597136) 2024-12-09T10:58:50,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741943_1119 (size=30873) 2024-12-09T10:58:50,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741943_1119 (size=30873) 2024-12-09T10:58:50,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741943_1119 (size=30873) 2024-12-09T10:58:50,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741944_1120 (size=29229) 2024-12-09T10:58:50,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741944_1120 (size=29229) 2024-12-09T10:58:50,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741944_1120 (size=29229) 2024-12-09T10:58:50,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741945_1121 (size=903861) 2024-12-09T10:58:50,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741945_1121 (size=903861) 2024-12-09T10:58:50,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741945_1121 (size=903861) 2024-12-09T10:58:51,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741946_1122 (size=5175431) 2024-12-09T10:58:51,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741946_1122 (size=5175431) 2024-12-09T10:58:51,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741946_1122 (size=5175431) 2024-12-09T10:58:51,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741947_1123 (size=232881) 2024-12-09T10:58:51,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741947_1123 (size=232881) 2024-12-09T10:58:51,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741947_1123 (size=232881) 2024-12-09T10:58:51,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741948_1124 (size=1323991) 2024-12-09T10:58:51,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741948_1124 (size=1323991) 2024-12-09T10:58:51,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741948_1124 (size=1323991) 2024-12-09T10:58:51,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741949_1125 (size=4695811) 2024-12-09T10:58:51,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741949_1125 (size=4695811) 2024-12-09T10:58:51,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741949_1125 (size=4695811) 2024-12-09T10:58:51,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741950_1126 (size=1877034) 2024-12-09T10:58:51,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741950_1126 (size=1877034) 2024-12-09T10:58:51,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741950_1126 (size=1877034) 2024-12-09T10:58:52,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741951_1127 (size=6425021) 2024-12-09T10:58:52,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741951_1127 (size=6425021) 2024-12-09T10:58:52,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741951_1127 (size=6425021) 2024-12-09T10:58:52,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741952_1128 (size=217555) 2024-12-09T10:58:52,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741952_1128 (size=217555) 2024-12-09T10:58:52,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741952_1128 (size=217555) 2024-12-09T10:58:52,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741953_1129 (size=443171) 2024-12-09T10:58:52,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741953_1129 (size=443171) 2024-12-09T10:58:52,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741953_1129 (size=443171) 2024-12-09T10:58:52,610 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T10:58:52,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741954_1130 (size=4188619) 2024-12-09T10:58:52,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741954_1130 (size=4188619) 2024-12-09T10:58:52,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741954_1130 (size=4188619) 2024-12-09T10:58:52,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741955_1131 (size=127628) 2024-12-09T10:58:52,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741955_1131 (size=127628) 2024-12-09T10:58:52,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741955_1131 (size=127628) 2024-12-09T10:58:52,791 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-09T10:58:52,794 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithTargetName' hfile list 2024-12-09T10:58:52,799 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.8 K 2024-12-09T10:58:52,799 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.0 K 2024-12-09T10:58:52,799 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.6 K 2024-12-09T10:58:52,799 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=4.9 K 2024-12-09T10:58:52,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741956_1132 (size=1031) 2024-12-09T10:58:52,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741956_1132 (size=1031) 2024-12-09T10:58:52,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741956_1132 (size=1031) 2024-12-09T10:58:52,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741957_1133 (size=35) 2024-12-09T10:58:52,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741957_1133 (size=35) 2024-12-09T10:58:52,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741957_1133 (size=35) 2024-12-09T10:58:53,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741958_1134 (size=304080) 2024-12-09T10:58:53,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741958_1134 (size=304080) 2024-12-09T10:58:53,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741958_1134 (size=304080) 2024-12-09T10:58:53,044 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T10:58:53,044 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T10:58:53,567 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0002_000001 (auth:SIMPLE) from 127.0.0.1:56644 2024-12-09T10:59:02,303 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0002_000001 (auth:SIMPLE) from 127.0.0.1:53180 2024-12-09T10:59:02,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741959_1135 (size=349778) 2024-12-09T10:59:02,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741959_1135 (size=349778) 2024-12-09T10:59:02,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741959_1135 (size=349778) 2024-12-09T10:59:04,499 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T10:59:04,647 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0002_000001 (auth:SIMPLE) from 127.0.0.1:41776 2024-12-09T10:59:04,648 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0002_000001 (auth:SIMPLE) from 127.0.0.1:56712 2024-12-09T10:59:05,457 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0002_000001 (auth:SIMPLE) from 127.0.0.1:41788 2024-12-09T10:59:05,462 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0002_000001 (auth:SIMPLE) from 127.0.0.1:56728 2024-12-09T10:59:07,598 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733741775522_0002_01_000006 while processing FINISH_CONTAINERS event 2024-12-09T10:59:10,562 DEBUG [master/3469f9ca0af3:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 0286d8488fd7b9f26d5346f68f39100c changed from -1.0 to 0.0, refreshing cache 2024-12-09T10:59:10,562 DEBUG [master/3469f9ca0af3:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 8699fe38cfb40bf429b0a89066c7114f changed from -1.0 to 0.0, refreshing cache 2024-12-09T10:59:13,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741960_1136 (size=15155) 2024-12-09T10:59:14,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741960_1136 (size=15155) 2024-12-09T10:59:14,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741960_1136 (size=15155) 2024-12-09T10:59:14,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741962_1138 (size=5706) 2024-12-09T10:59:14,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741962_1138 (size=5706) 2024-12-09T10:59:14,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741962_1138 (size=5706) 2024-12-09T10:59:16,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741963_1139 (size=5032) 2024-12-09T10:59:16,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741963_1139 (size=5032) 2024-12-09T10:59:16,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741963_1139 (size=5032) 2024-12-09T10:59:16,933 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_2/usercache/jenkins/appcache/application_1733741775522_0002/container_1733741775522_0002_01_000005/launch_container.sh] 2024-12-09T10:59:16,933 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_2/usercache/jenkins/appcache/application_1733741775522_0002/container_1733741775522_0002_01_000005/container_tokens] 2024-12-09T10:59:16,933 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_2/usercache/jenkins/appcache/application_1733741775522_0002/container_1733741775522_0002_01_000005/sysfs] 2024-12-09T10:59:16,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741964_1140 (size=8242) 2024-12-09T10:59:17,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741964_1140 (size=8242) 2024-12-09T10:59:17,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741964_1140 (size=8242) 2024-12-09T10:59:17,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741961_1137 (size=31753) 2024-12-09T10:59:17,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741961_1137 (size=31753) 2024-12-09T10:59:17,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741961_1137 (size=31753) 2024-12-09T10:59:17,269 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_3/usercache/jenkins/appcache/application_1733741775522_0002/container_1733741775522_0002_01_000003/launch_container.sh] 2024-12-09T10:59:17,269 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_3/usercache/jenkins/appcache/application_1733741775522_0002/container_1733741775522_0002_01_000003/container_tokens] 2024-12-09T10:59:17,271 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_3/usercache/jenkins/appcache/application_1733741775522_0002/container_1733741775522_0002_01_000003/sysfs] 2024-12-09T10:59:17,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741965_1141 (size=465) 2024-12-09T10:59:17,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741965_1141 (size=465) 2024-12-09T10:59:17,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741965_1141 (size=465) 2024-12-09T10:59:17,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741966_1142 (size=31753) 2024-12-09T10:59:17,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741966_1142 (size=31753) 2024-12-09T10:59:17,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741966_1142 (size=31753) 2024-12-09T10:59:17,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741967_1143 (size=349778) 2024-12-09T10:59:17,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741967_1143 (size=349778) 2024-12-09T10:59:17,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741967_1143 (size=349778) 2024-12-09T10:59:19,503 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-09T10:59:19,508 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-09T10:59:19,558 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: testExportWithTargetName 2024-12-09T10:59:19,558 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-09T10:59:19,560 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-09T10:59:19,560 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1751713061_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testExportWithTargetName at hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-09T10:59:19,561 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testExportWithTargetName/.snapshotinfo 2024-12-09T10:59:19,561 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testExportWithTargetName/data.manifest 2024-12-09T10:59:19,561 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1751713061_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733741926803/.hbase-snapshot/testExportWithTargetName at hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733741926803/.hbase-snapshot/testExportWithTargetName 2024-12-09T10:59:19,562 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733741926803/.hbase-snapshot/testExportWithTargetName/.snapshotinfo 2024-12-09T10:59:19,562 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733741926803/.hbase-snapshot/testExportWithTargetName/data.manifest 2024-12-09T10:59:19,591 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithTargetName 2024-12-09T10:59:19,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=56, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName 2024-12-09T10:59:19,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-09T10:59:19,619 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733741959618"}]},"ts":"1733741959618"} 2024-12-09T10:59:19,626 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLING in hbase:meta 2024-12-09T10:59:19,626 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithTargetName to state=DISABLING 2024-12-09T10:59:19,631 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=57, ppid=56, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName}] 2024-12-09T10:59:19,638 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=0286d8488fd7b9f26d5346f68f39100c, UNASSIGN}, {pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=8699fe38cfb40bf429b0a89066c7114f, UNASSIGN}] 2024-12-09T10:59:19,640 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=8699fe38cfb40bf429b0a89066c7114f, UNASSIGN 2024-12-09T10:59:19,641 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=0286d8488fd7b9f26d5346f68f39100c, UNASSIGN 2024-12-09T10:59:19,644 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=8699fe38cfb40bf429b0a89066c7114f, regionState=CLOSING, regionLocation=3469f9ca0af3,42349,1733741767108 2024-12-09T10:59:19,645 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=58 updating hbase:meta row=0286d8488fd7b9f26d5346f68f39100c, regionState=CLOSING, regionLocation=3469f9ca0af3,33293,1733741767044 2024-12-09T10:59:19,654 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_2/usercache/jenkins/appcache/application_1733741775522_0002/container_1733741775522_0002_01_000002/launch_container.sh] 2024-12-09T10:59:19,654 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_2/usercache/jenkins/appcache/application_1733741775522_0002/container_1733741775522_0002_01_000002/container_tokens] 2024-12-09T10:59:19,655 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_2/usercache/jenkins/appcache/application_1733741775522_0002/container_1733741775522_0002_01_000002/sysfs] 2024-12-09T10:59:19,661 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=8699fe38cfb40bf429b0a89066c7114f, UNASSIGN because future has completed 2024-12-09T10:59:19,663 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=0286d8488fd7b9f26d5346f68f39100c, UNASSIGN because future has completed 2024-12-09T10:59:19,664 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T10:59:19,664 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=60, ppid=58, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0286d8488fd7b9f26d5346f68f39100c, server=3469f9ca0af3,33293,1733741767044}] 2024-12-09T10:59:19,666 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T10:59:19,666 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=61, ppid=59, state=RUNNABLE, hasLock=false; CloseRegionProcedure 8699fe38cfb40bf429b0a89066c7114f, server=3469f9ca0af3,42349,1733741767108}] 2024-12-09T10:59:19,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-09T10:59:19,826 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(122): Close 0286d8488fd7b9f26d5346f68f39100c 2024-12-09T10:59:19,826 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T10:59:19,827 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1722): Closing 0286d8488fd7b9f26d5346f68f39100c, disabling compactions & flushes 2024-12-09T10:59:19,827 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1733741923088.0286d8488fd7b9f26d5346f68f39100c. 2024-12-09T10:59:19,827 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(122): Close 8699fe38cfb40bf429b0a89066c7114f 2024-12-09T10:59:19,827 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1733741923088.0286d8488fd7b9f26d5346f68f39100c. 2024-12-09T10:59:19,827 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T10:59:19,827 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1733741923088.0286d8488fd7b9f26d5346f68f39100c. after waiting 0 ms 2024-12-09T10:59:19,827 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1733741923088.0286d8488fd7b9f26d5346f68f39100c. 2024-12-09T10:59:19,827 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1722): Closing 8699fe38cfb40bf429b0a89066c7114f, disabling compactions & flushes 2024-12-09T10:59:19,827 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1733741923088.8699fe38cfb40bf429b0a89066c7114f. 2024-12-09T10:59:19,827 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1733741923088.8699fe38cfb40bf429b0a89066c7114f. 2024-12-09T10:59:19,828 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1733741923088.8699fe38cfb40bf429b0a89066c7114f. after waiting 0 ms 2024-12-09T10:59:19,828 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1733741923088.8699fe38cfb40bf429b0a89066c7114f. 2024-12-09T10:59:19,876 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithTargetName/0286d8488fd7b9f26d5346f68f39100c/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T10:59:19,877 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithTargetName/8699fe38cfb40bf429b0a89066c7114f/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T10:59:19,878 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T10:59:19,878 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1733741923088.8699fe38cfb40bf429b0a89066c7114f. 2024-12-09T10:59:19,878 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1676): Region close journal for 8699fe38cfb40bf429b0a89066c7114f: Waiting for close lock at 1733741959827Running coprocessor pre-close hooks at 1733741959827Disabling compacts and flushes for region at 1733741959827Disabling writes for close at 1733741959828 (+1 ms)Writing region close event to WAL at 1733741959836 (+8 ms)Running coprocessor post-close hooks at 1733741959878 (+42 ms)Closed at 1733741959878 2024-12-09T10:59:19,878 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T10:59:19,879 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1733741923088.0286d8488fd7b9f26d5346f68f39100c. 2024-12-09T10:59:19,879 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1676): Region close journal for 0286d8488fd7b9f26d5346f68f39100c: Waiting for close lock at 1733741959827Running coprocessor pre-close hooks at 1733741959827Disabling compacts and flushes for region at 1733741959827Disabling writes for close at 1733741959827Writing region close event to WAL at 1733741959830 (+3 ms)Running coprocessor post-close hooks at 1733741959878 (+48 ms)Closed at 1733741959878 2024-12-09T10:59:19,888 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(157): Closed 8699fe38cfb40bf429b0a89066c7114f 2024-12-09T10:59:19,889 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=8699fe38cfb40bf429b0a89066c7114f, regionState=CLOSED 2024-12-09T10:59:19,904 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=58 updating hbase:meta row=0286d8488fd7b9f26d5346f68f39100c, regionState=CLOSED 2024-12-09T10:59:19,905 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(157): Closed 0286d8488fd7b9f26d5346f68f39100c 2024-12-09T10:59:19,910 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=61, ppid=59, state=RUNNABLE, hasLock=false; CloseRegionProcedure 8699fe38cfb40bf429b0a89066c7114f, server=3469f9ca0af3,42349,1733741767108 because future has completed 2024-12-09T10:59:19,916 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=60, ppid=58, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0286d8488fd7b9f26d5346f68f39100c, server=3469f9ca0af3,33293,1733741767044 because future has completed 2024-12-09T10:59:19,924 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=60, resume processing ppid=58 2024-12-09T10:59:19,927 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=60, ppid=58, state=SUCCESS, hasLock=false; CloseRegionProcedure 0286d8488fd7b9f26d5346f68f39100c, server=3469f9ca0af3,33293,1733741767044 in 257 msec 2024-12-09T10:59:19,927 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=61, resume processing ppid=59 2024-12-09T10:59:19,927 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=58, ppid=57, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=0286d8488fd7b9f26d5346f68f39100c, UNASSIGN in 287 msec 2024-12-09T10:59:19,927 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=61, ppid=59, state=SUCCESS, hasLock=false; CloseRegionProcedure 8699fe38cfb40bf429b0a89066c7114f, server=3469f9ca0af3,42349,1733741767108 in 257 msec 2024-12-09T10:59:19,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-09T10:59:19,942 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=59, resume processing ppid=57 2024-12-09T10:59:19,942 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=59, ppid=57, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=8699fe38cfb40bf429b0a89066c7114f, UNASSIGN in 289 msec 2024-12-09T10:59:19,958 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=57, resume processing ppid=56 2024-12-09T10:59:19,959 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=57, ppid=56, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName in 313 msec 2024-12-09T10:59:19,971 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733741959970"}]},"ts":"1733741959970"} 2024-12-09T10:59:19,990 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLED in hbase:meta 2024-12-09T10:59:19,990 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithTargetName to state=DISABLED 2024-12-09T10:59:20,019 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=56, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName in 406 msec 2024-12-09T10:59:20,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-09T10:59:20,252 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithTargetName completed 2024-12-09T10:59:20,253 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithTargetName 2024-12-09T10:59:20,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-09T10:59:20,256 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-09T10:59:20,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithTargetName 2024-12-09T10:59:20,257 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=62, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-09T10:59:20,264 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39691 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithTargetName 2024-12-09T10:59:20,270 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithTargetName/0286d8488fd7b9f26d5346f68f39100c 2024-12-09T10:59:20,274 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-09T10:59:20,274 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-09T10:59:20,278 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithTargetName/8699fe38cfb40bf429b0a89066c7114f 2024-12-09T10:59:20,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-09T10:59:20,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-09T10:59:20,286 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-09T10:59:20,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-09T10:59:20,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T10:59:20,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T10:59:20,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-09T10:59:20,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T10:59:20,287 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-09T10:59:20,287 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data null 2024-12-09T10:59:20,287 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-09T10:59:20,289 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithTargetName/8699fe38cfb40bf429b0a89066c7114f/cf, FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithTargetName/8699fe38cfb40bf429b0a89066c7114f/recovered.edits] 2024-12-09T10:59:20,290 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data null 2024-12-09T10:59:20,290 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-09T10:59:20,290 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T10:59:20,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=62 2024-12-09T10:59:20,298 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithTargetName/0286d8488fd7b9f26d5346f68f39100c/cf, FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithTargetName/0286d8488fd7b9f26d5346f68f39100c/recovered.edits] 2024-12-09T10:59:20,330 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithTargetName/8699fe38cfb40bf429b0a89066c7114f/cf/57cbf106448440ceb47f78dcdab18169 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportWithTargetName/8699fe38cfb40bf429b0a89066c7114f/cf/57cbf106448440ceb47f78dcdab18169 2024-12-09T10:59:20,340 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithTargetName/0286d8488fd7b9f26d5346f68f39100c/cf/8ca9f58696384b9a986c4c2465fa4ddd to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportWithTargetName/0286d8488fd7b9f26d5346f68f39100c/cf/8ca9f58696384b9a986c4c2465fa4ddd 2024-12-09T10:59:20,342 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithTargetName/8699fe38cfb40bf429b0a89066c7114f/recovered.edits/9.seqid to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportWithTargetName/8699fe38cfb40bf429b0a89066c7114f/recovered.edits/9.seqid 2024-12-09T10:59:20,343 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithTargetName/8699fe38cfb40bf429b0a89066c7114f 2024-12-09T10:59:20,346 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithTargetName/0286d8488fd7b9f26d5346f68f39100c/recovered.edits/9.seqid to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportWithTargetName/0286d8488fd7b9f26d5346f68f39100c/recovered.edits/9.seqid 2024-12-09T10:59:20,348 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithTargetName/0286d8488fd7b9f26d5346f68f39100c 2024-12-09T10:59:20,354 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithTargetName regions 2024-12-09T10:59:20,354 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71 2024-12-09T10:59:20,355 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf] 2024-12-09T10:59:20,361 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b20241209871c8cbef4e24ec9a091380c7ecdecdb_8699fe38cfb40bf429b0a89066c7114f to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b20241209871c8cbef4e24ec9a091380c7ecdecdb_8699fe38cfb40bf429b0a89066c7114f 2024-12-09T10:59:20,363 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e202412094df597dbeff6400993e0a49fe90a0806_0286d8488fd7b9f26d5346f68f39100c to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e202412094df597dbeff6400993e0a49fe90a0806_0286d8488fd7b9f26d5346f68f39100c 2024-12-09T10:59:20,363 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71 2024-12-09T10:59:20,374 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=62, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-09T10:59:20,379 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithTargetName from hbase:meta 2024-12-09T10:59:20,383 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithTargetName' descriptor. 2024-12-09T10:59:20,384 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=62, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-09T10:59:20,385 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithTargetName' from region states. 2024-12-09T10:59:20,385 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,,1733741923088.0286d8488fd7b9f26d5346f68f39100c.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733741960385"}]},"ts":"9223372036854775807"} 2024-12-09T10:59:20,385 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,1,1733741923088.8699fe38cfb40bf429b0a89066c7114f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733741960385"}]},"ts":"9223372036854775807"} 2024-12-09T10:59:20,389 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-09T10:59:20,389 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 0286d8488fd7b9f26d5346f68f39100c, NAME => 'testtb-testExportWithTargetName,,1733741923088.0286d8488fd7b9f26d5346f68f39100c.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 8699fe38cfb40bf429b0a89066c7114f, NAME => 'testtb-testExportWithTargetName,1,1733741923088.8699fe38cfb40bf429b0a89066c7114f.', STARTKEY => '1', ENDKEY => ''}] 2024-12-09T10:59:20,389 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithTargetName' as deleted. 2024-12-09T10:59:20,390 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733741960389"}]},"ts":"9223372036854775807"} 2024-12-09T10:59:20,398 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithTargetName state from META 2024-12-09T10:59:20,399 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=62, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-09T10:59:20,401 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=62, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName in 147 msec 2024-12-09T10:59:20,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=62 2024-12-09T10:59:20,407 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithTargetName 2024-12-09T10:59:20,407 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithTargetName completed 2024-12-09T10:59:20,426 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithTargetName" type: DISABLED 2024-12-09T10:59:20,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithTargetName 2024-12-09T10:59:20,433 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithTargetName" type: DISABLED 2024-12-09T10:59:20,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithTargetName 2024-12-09T10:59:20,464 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportWithTargetName Thread=782 (was 758) Potentially hanging thread: IPC Client (1359318497) connection to localhost/127.0.0.1:36087 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_560849759_1 at /127.0.0.1:48102 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_560849759_1 at /127.0.0.1:60988 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33425 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1751713061_22 at /127.0.0.1:41154 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36907 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MoveIntermediateToDone Thread #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36087 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-2213 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36907 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.getContainerPid(ContainerLaunch.java:1062) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerCleanup.run(ContainerCleanup.java:119) java.base@17.0.11/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1751713061_22 at /127.0.0.1:46640 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 1695) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1751713061_22 at /127.0.0.1:53368 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=801 (was 781) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1178 (was 884) - SystemLoadAverage LEAK? -, ProcessCount=14 (was 14), AvailableMemoryMB=3810 (was 2741) - AvailableMemoryMB LEAK? - 2024-12-09T10:59:20,464 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=782 is superior to 500 2024-12-09T10:59:20,499 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_2/usercache/jenkins/appcache/application_1733741775522_0002/container_1733741775522_0002_01_000004/launch_container.sh] 2024-12-09T10:59:20,499 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_2/usercache/jenkins/appcache/application_1733741775522_0002/container_1733741775522_0002_01_000004/container_tokens] 2024-12-09T10:59:20,499 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_2/usercache/jenkins/appcache/application_1733741775522_0002/container_1733741775522_0002_01_000004/sysfs] 2024-12-09T10:59:20,504 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportWithResetTtl Thread=782, OpenFileDescriptor=801, MaxFileDescriptor=1048576, SystemLoadAverage=1178, ProcessCount=14, AvailableMemoryMB=3810 2024-12-09T10:59:20,504 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=782 is superior to 500 2024-12-09T10:59:20,508 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T10:59:20,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl 2024-12-09T10:59:20,516 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T10:59:20,517 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithResetTtl" procId is: 63 2024-12-09T10:59:20,518 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T10:59:20,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-09T10:59:20,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741968_1144 (size=440) 2024-12-09T10:59:20,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741968_1144 (size=440) 2024-12-09T10:59:20,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741968_1144 (size=440) 2024-12-09T10:59:20,581 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 245fc14e78adb90753336d94265cf7d5, NAME => 'testtb-testExportWithResetTtl,,1733741960507.245fc14e78adb90753336d94265cf7d5.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T10:59:20,586 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => a8b9ff6a0eb0adba92ba641ddcac9ba6, NAME => 'testtb-testExportWithResetTtl,1,1733741960507.a8b9ff6a0eb0adba92ba641ddcac9ba6.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T10:59:20,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-09T10:59:20,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741969_1145 (size=65) 2024-12-09T10:59:20,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741969_1145 (size=65) 2024-12-09T10:59:20,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741969_1145 (size=65) 2024-12-09T10:59:20,655 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1733741960507.245fc14e78adb90753336d94265cf7d5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T10:59:20,656 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing 245fc14e78adb90753336d94265cf7d5, disabling compactions & flushes 2024-12-09T10:59:20,656 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1733741960507.245fc14e78adb90753336d94265cf7d5. 2024-12-09T10:59:20,656 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1733741960507.245fc14e78adb90753336d94265cf7d5. 2024-12-09T10:59:20,656 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1733741960507.245fc14e78adb90753336d94265cf7d5. after waiting 0 ms 2024-12-09T10:59:20,656 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1733741960507.245fc14e78adb90753336d94265cf7d5. 2024-12-09T10:59:20,656 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1733741960507.245fc14e78adb90753336d94265cf7d5. 2024-12-09T10:59:20,656 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 245fc14e78adb90753336d94265cf7d5: Waiting for close lock at 1733741960655Disabling compacts and flushes for region at 1733741960655Disabling writes for close at 1733741960656 (+1 ms)Writing region close event to WAL at 1733741960656Closed at 1733741960656 2024-12-09T10:59:20,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741970_1146 (size=65) 2024-12-09T10:59:20,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741970_1146 (size=65) 2024-12-09T10:59:20,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741970_1146 (size=65) 2024-12-09T10:59:20,684 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1733741960507.a8b9ff6a0eb0adba92ba641ddcac9ba6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T10:59:20,684 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing a8b9ff6a0eb0adba92ba641ddcac9ba6, disabling compactions & flushes 2024-12-09T10:59:20,684 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1733741960507.a8b9ff6a0eb0adba92ba641ddcac9ba6. 2024-12-09T10:59:20,684 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1733741960507.a8b9ff6a0eb0adba92ba641ddcac9ba6. 2024-12-09T10:59:20,684 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1733741960507.a8b9ff6a0eb0adba92ba641ddcac9ba6. after waiting 0 ms 2024-12-09T10:59:20,684 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1733741960507.a8b9ff6a0eb0adba92ba641ddcac9ba6. 2024-12-09T10:59:20,684 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1733741960507.a8b9ff6a0eb0adba92ba641ddcac9ba6. 2024-12-09T10:59:20,684 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for a8b9ff6a0eb0adba92ba641ddcac9ba6: Waiting for close lock at 1733741960684Disabling compacts and flushes for region at 1733741960684Disabling writes for close at 1733741960684Writing region close event to WAL at 1733741960684Closed at 1733741960684 2024-12-09T10:59:20,694 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T10:59:20,694 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,,1733741960507.245fc14e78adb90753336d94265cf7d5.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733741960694"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733741960694"}]},"ts":"1733741960694"} 2024-12-09T10:59:20,694 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,1,1733741960507.a8b9ff6a0eb0adba92ba641ddcac9ba6.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733741960694"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733741960694"}]},"ts":"1733741960694"} 2024-12-09T10:59:20,702 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-09T10:59:20,706 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T10:59:20,707 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733741960706"}]},"ts":"1733741960706"} 2024-12-09T10:59:20,718 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-09T10:59:20,718 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {3469f9ca0af3=0} racks are {/default-rack=0} 2024-12-09T10:59:20,725 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T10:59:20,725 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T10:59:20,725 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T10:59:20,725 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T10:59:20,725 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T10:59:20,725 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T10:59:20,725 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T10:59:20,726 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T10:59:20,726 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T10:59:20,726 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T10:59:20,726 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=245fc14e78adb90753336d94265cf7d5, ASSIGN}, {pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=a8b9ff6a0eb0adba92ba641ddcac9ba6, ASSIGN}] 2024-12-09T10:59:20,731 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=a8b9ff6a0eb0adba92ba641ddcac9ba6, ASSIGN 2024-12-09T10:59:20,731 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=245fc14e78adb90753336d94265cf7d5, ASSIGN 2024-12-09T10:59:20,735 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=a8b9ff6a0eb0adba92ba641ddcac9ba6, ASSIGN; state=OFFLINE, location=3469f9ca0af3,33293,1733741767044; forceNewPlan=false, retain=false 2024-12-09T10:59:20,736 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=245fc14e78adb90753336d94265cf7d5, ASSIGN; state=OFFLINE, location=3469f9ca0af3,39691,1733741766880; forceNewPlan=false, retain=false 2024-12-09T10:59:20,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-09T10:59:20,890 INFO [3469f9ca0af3:35815 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-09T10:59:20,891 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=64 updating hbase:meta row=245fc14e78adb90753336d94265cf7d5, regionState=OPENING, regionLocation=3469f9ca0af3,39691,1733741766880 2024-12-09T10:59:20,891 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=65 updating hbase:meta row=a8b9ff6a0eb0adba92ba641ddcac9ba6, regionState=OPENING, regionLocation=3469f9ca0af3,33293,1733741767044 2024-12-09T10:59:20,894 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=a8b9ff6a0eb0adba92ba641ddcac9ba6, ASSIGN because future has completed 2024-12-09T10:59:20,895 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=245fc14e78adb90753336d94265cf7d5, ASSIGN because future has completed 2024-12-09T10:59:20,898 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=67, ppid=65, state=RUNNABLE, hasLock=false; OpenRegionProcedure a8b9ff6a0eb0adba92ba641ddcac9ba6, server=3469f9ca0af3,33293,1733741767044}] 2024-12-09T10:59:20,898 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=66, ppid=64, state=RUNNABLE, hasLock=false; OpenRegionProcedure 245fc14e78adb90753336d94265cf7d5, server=3469f9ca0af3,39691,1733741766880}] 2024-12-09T10:59:21,053 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42731, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T10:59:21,057 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,1,1733741960507.a8b9ff6a0eb0adba92ba641ddcac9ba6. 2024-12-09T10:59:21,057 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7752): Opening region: {ENCODED => a8b9ff6a0eb0adba92ba641ddcac9ba6, NAME => 'testtb-testExportWithResetTtl,1,1733741960507.a8b9ff6a0eb0adba92ba641ddcac9ba6.', STARTKEY => '1', ENDKEY => ''} 2024-12-09T10:59:21,058 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,1,1733741960507.a8b9ff6a0eb0adba92ba641ddcac9ba6. service=AccessControlService 2024-12-09T10:59:21,058 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T10:59:21,058 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl a8b9ff6a0eb0adba92ba641ddcac9ba6 2024-12-09T10:59:21,058 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1733741960507.a8b9ff6a0eb0adba92ba641ddcac9ba6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T10:59:21,059 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7794): checking encryption for a8b9ff6a0eb0adba92ba641ddcac9ba6 2024-12-09T10:59:21,059 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7797): checking classloading for a8b9ff6a0eb0adba92ba641ddcac9ba6 2024-12-09T10:59:21,062 INFO [StoreOpener-a8b9ff6a0eb0adba92ba641ddcac9ba6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region a8b9ff6a0eb0adba92ba641ddcac9ba6 2024-12-09T10:59:21,062 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,,1733741960507.245fc14e78adb90753336d94265cf7d5. 2024-12-09T10:59:21,062 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7752): Opening region: {ENCODED => 245fc14e78adb90753336d94265cf7d5, NAME => 'testtb-testExportWithResetTtl,,1733741960507.245fc14e78adb90753336d94265cf7d5.', STARTKEY => '', ENDKEY => '1'} 2024-12-09T10:59:21,063 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,,1733741960507.245fc14e78adb90753336d94265cf7d5. service=AccessControlService 2024-12-09T10:59:21,063 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T10:59:21,063 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 245fc14e78adb90753336d94265cf7d5 2024-12-09T10:59:21,063 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1733741960507.245fc14e78adb90753336d94265cf7d5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T10:59:21,063 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7794): checking encryption for 245fc14e78adb90753336d94265cf7d5 2024-12-09T10:59:21,063 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7797): checking classloading for 245fc14e78adb90753336d94265cf7d5 2024-12-09T10:59:21,064 INFO [StoreOpener-a8b9ff6a0eb0adba92ba641ddcac9ba6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a8b9ff6a0eb0adba92ba641ddcac9ba6 columnFamilyName cf 2024-12-09T10:59:21,067 DEBUG [StoreOpener-a8b9ff6a0eb0adba92ba641ddcac9ba6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:59:21,068 INFO [StoreOpener-245fc14e78adb90753336d94265cf7d5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 245fc14e78adb90753336d94265cf7d5 2024-12-09T10:59:21,068 INFO [StoreOpener-a8b9ff6a0eb0adba92ba641ddcac9ba6-1 {}] regionserver.HStore(327): Store=a8b9ff6a0eb0adba92ba641ddcac9ba6/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T10:59:21,068 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1038): replaying wal for a8b9ff6a0eb0adba92ba641ddcac9ba6 2024-12-09T10:59:21,069 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithResetTtl/a8b9ff6a0eb0adba92ba641ddcac9ba6 2024-12-09T10:59:21,070 INFO [StoreOpener-245fc14e78adb90753336d94265cf7d5-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 245fc14e78adb90753336d94265cf7d5 columnFamilyName cf 2024-12-09T10:59:21,070 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithResetTtl/a8b9ff6a0eb0adba92ba641ddcac9ba6 2024-12-09T10:59:21,070 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1048): stopping wal replay for a8b9ff6a0eb0adba92ba641ddcac9ba6 2024-12-09T10:59:21,070 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1060): Cleaning up temporary data for a8b9ff6a0eb0adba92ba641ddcac9ba6 2024-12-09T10:59:21,071 DEBUG [StoreOpener-245fc14e78adb90753336d94265cf7d5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:59:21,073 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1093): writing seq id for a8b9ff6a0eb0adba92ba641ddcac9ba6 2024-12-09T10:59:21,073 INFO [StoreOpener-245fc14e78adb90753336d94265cf7d5-1 {}] regionserver.HStore(327): Store=245fc14e78adb90753336d94265cf7d5/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T10:59:21,074 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1038): replaying wal for 245fc14e78adb90753336d94265cf7d5 2024-12-09T10:59:21,077 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithResetTtl/a8b9ff6a0eb0adba92ba641ddcac9ba6/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T10:59:21,077 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1114): Opened a8b9ff6a0eb0adba92ba641ddcac9ba6; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62701642, jitterRate=-0.06567272543907166}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T10:59:21,077 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a8b9ff6a0eb0adba92ba641ddcac9ba6 2024-12-09T10:59:21,078 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1006): Region open journal for a8b9ff6a0eb0adba92ba641ddcac9ba6: Running coprocessor pre-open hook at 1733741961059Writing region info on filesystem at 1733741961059Initializing all the Stores at 1733741961061 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733741961061Cleaning up temporary data from old regions at 1733741961070 (+9 ms)Running coprocessor post-open hooks at 1733741961077 (+7 ms)Region opened successfully at 1733741961078 (+1 ms) 2024-12-09T10:59:21,078 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithResetTtl/245fc14e78adb90753336d94265cf7d5 2024-12-09T10:59:21,079 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,1,1733741960507.a8b9ff6a0eb0adba92ba641ddcac9ba6., pid=67, masterSystemTime=1733741961051 2024-12-09T10:59:21,080 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithResetTtl/245fc14e78adb90753336d94265cf7d5 2024-12-09T10:59:21,081 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1048): stopping wal replay for 245fc14e78adb90753336d94265cf7d5 2024-12-09T10:59:21,081 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1060): Cleaning up temporary data for 245fc14e78adb90753336d94265cf7d5 2024-12-09T10:59:21,082 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,1,1733741960507.a8b9ff6a0eb0adba92ba641ddcac9ba6. 2024-12-09T10:59:21,082 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,1,1733741960507.a8b9ff6a0eb0adba92ba641ddcac9ba6. 2024-12-09T10:59:21,083 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=65 updating hbase:meta row=a8b9ff6a0eb0adba92ba641ddcac9ba6, regionState=OPEN, openSeqNum=2, regionLocation=3469f9ca0af3,33293,1733741767044 2024-12-09T10:59:21,088 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1093): writing seq id for 245fc14e78adb90753336d94265cf7d5 2024-12-09T10:59:21,091 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=67, ppid=65, state=RUNNABLE, hasLock=false; OpenRegionProcedure a8b9ff6a0eb0adba92ba641ddcac9ba6, server=3469f9ca0af3,33293,1733741767044 because future has completed 2024-12-09T10:59:21,093 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithResetTtl/245fc14e78adb90753336d94265cf7d5/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T10:59:21,093 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1114): Opened 245fc14e78adb90753336d94265cf7d5; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59765099, jitterRate=-0.10943062603473663}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T10:59:21,093 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 245fc14e78adb90753336d94265cf7d5 2024-12-09T10:59:21,093 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1006): Region open journal for 245fc14e78adb90753336d94265cf7d5: Running coprocessor pre-open hook at 1733741961063Writing region info on filesystem at 1733741961063Initializing all the Stores at 1733741961067 (+4 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733741961067Cleaning up temporary data from old regions at 1733741961081 (+14 ms)Running coprocessor post-open hooks at 1733741961093 (+12 ms)Region opened successfully at 1733741961093 2024-12-09T10:59:21,095 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=67, resume processing ppid=65 2024-12-09T10:59:21,095 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=67, ppid=65, state=SUCCESS, hasLock=false; OpenRegionProcedure a8b9ff6a0eb0adba92ba641ddcac9ba6, server=3469f9ca0af3,33293,1733741767044 in 195 msec 2024-12-09T10:59:21,097 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=65, ppid=63, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=a8b9ff6a0eb0adba92ba641ddcac9ba6, ASSIGN in 369 msec 2024-12-09T10:59:21,098 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,,1733741960507.245fc14e78adb90753336d94265cf7d5., pid=66, masterSystemTime=1733741961051 2024-12-09T10:59:21,100 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,,1733741960507.245fc14e78adb90753336d94265cf7d5. 2024-12-09T10:59:21,100 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,,1733741960507.245fc14e78adb90753336d94265cf7d5. 2024-12-09T10:59:21,101 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=64 updating hbase:meta row=245fc14e78adb90753336d94265cf7d5, regionState=OPEN, openSeqNum=2, regionLocation=3469f9ca0af3,39691,1733741766880 2024-12-09T10:59:21,107 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=66, ppid=64, state=RUNNABLE, hasLock=false; OpenRegionProcedure 245fc14e78adb90753336d94265cf7d5, server=3469f9ca0af3,39691,1733741766880 because future has completed 2024-12-09T10:59:21,110 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=66, resume processing ppid=64 2024-12-09T10:59:21,110 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=66, ppid=64, state=SUCCESS, hasLock=false; OpenRegionProcedure 245fc14e78adb90753336d94265cf7d5, server=3469f9ca0af3,39691,1733741766880 in 209 msec 2024-12-09T10:59:21,112 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=64, resume processing ppid=63 2024-12-09T10:59:21,112 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=64, ppid=63, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=245fc14e78adb90753336d94265cf7d5, ASSIGN in 384 msec 2024-12-09T10:59:21,114 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T10:59:21,114 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733741961114"}]},"ts":"1733741961114"} 2024-12-09T10:59:21,116 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-09T10:59:21,118 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T10:59:21,118 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithResetTtl jenkins: RWXCA 2024-12-09T10:59:21,124 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39691 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-09T10:59:21,130 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T10:59:21,130 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T10:59:21,130 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T10:59:21,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T10:59:21,138 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-09T10:59:21,138 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-09T10:59:21,140 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-09T10:59:21,142 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-09T10:59:21,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-09T10:59:21,146 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=63, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl in 630 msec 2024-12-09T10:59:21,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-09T10:59:21,655 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-09T10:59:21,655 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-09T10:59:21,667 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithResetTtl 2024-12-09T10:59:21,667 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithResetTtl,,1733741960507.245fc14e78adb90753336d94265cf7d5. 2024-12-09T10:59:21,667 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T10:59:21,678 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-09T10:59:21,698 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-09T10:59:21,726 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-09T10:59:21,734 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-09T10:59:21,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733741961734 (current time:1733741961734). 2024-12-09T10:59:21,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T10:59:21,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-09T10:59:21,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T10:59:21,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49a76758, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:59:21,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T10:59:21,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T10:59:21,742 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T10:59:21,746 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T10:59:21,746 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T10:59:21,746 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37e6781a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:59:21,746 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T10:59:21,747 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T10:59:21,747 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:59:21,754 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54682, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T10:59:21,757 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b661f61, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:59:21,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T10:59:21,760 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,39691,1733741766880, seqNum=-1] 2024-12-09T10:59:21,760 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T10:59:21,764 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53184, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T10:59:21,772 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815. 2024-12-09T10:59:21,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T10:59:21,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:59:21,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:59:21,773 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T10:59:21,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12797e36, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:59:21,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T10:59:21,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T10:59:21,779 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T10:59:21,786 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T10:59:21,786 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T10:59:21,786 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14e58e8d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:59:21,786 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T10:59:21,786 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T10:59:21,787 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:59:21,792 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54712, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T10:59:21,797 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b0e32ff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:59:21,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T10:59:21,804 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,39691,1733741766880, seqNum=-1] 2024-12-09T10:59:21,804 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T10:59:21,808 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53198, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T10:59:21,811 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb., hostname=3469f9ca0af3,39691,1733741766880, seqNum=2] 2024-12-09T10:59:21,818 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815. 2024-12-09T10:59:21,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T10:59:21,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:59:21,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:59:21,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-09T10:59:21,820 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T10:59:21,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T10:59:21,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-09T10:59:21,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-09T10:59:21,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-09T10:59:21,842 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T10:59:21,846 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T10:59:21,856 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T10:59:21,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741971_1147 (size=161) 2024-12-09T10:59:21,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741971_1147 (size=161) 2024-12-09T10:59:21,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741971_1147 (size=161) 2024-12-09T10:59:21,882 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T10:59:21,882 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 245fc14e78adb90753336d94265cf7d5}, {pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a8b9ff6a0eb0adba92ba641ddcac9ba6}] 2024-12-09T10:59:21,884 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 245fc14e78adb90753336d94265cf7d5 2024-12-09T10:59:21,884 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a8b9ff6a0eb0adba92ba641ddcac9ba6 2024-12-09T10:59:21,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-09T10:59:22,040 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39691 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=69 2024-12-09T10:59:22,040 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1733741960507.245fc14e78adb90753336d94265cf7d5. 2024-12-09T10:59:22,041 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.HRegion(2603): Flush status journal for 245fc14e78adb90753336d94265cf7d5: 2024-12-09T10:59:22,041 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1733741960507.245fc14e78adb90753336d94265cf7d5. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-09T10:59:22,041 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1733741960507.245fc14e78adb90753336d94265cf7d5.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-09T10:59:22,041 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T10:59:22,041 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-09T10:59:22,042 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33293 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=70 2024-12-09T10:59:22,042 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1733741960507.a8b9ff6a0eb0adba92ba641ddcac9ba6. 2024-12-09T10:59:22,042 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.HRegion(2603): Flush status journal for a8b9ff6a0eb0adba92ba641ddcac9ba6: 2024-12-09T10:59:22,042 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1733741960507.a8b9ff6a0eb0adba92ba641ddcac9ba6. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-09T10:59:22,042 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1733741960507.a8b9ff6a0eb0adba92ba641ddcac9ba6.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-09T10:59:22,042 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T10:59:22,042 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-09T10:59:22,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741972_1148 (size=68) 2024-12-09T10:59:22,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741972_1148 (size=68) 2024-12-09T10:59:22,070 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1733741960507.245fc14e78adb90753336d94265cf7d5. 2024-12-09T10:59:22,070 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=69 2024-12-09T10:59:22,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741972_1148 (size=68) 2024-12-09T10:59:22,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741973_1149 (size=68) 2024-12-09T10:59:22,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741973_1149 (size=68) 2024-12-09T10:59:22,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741973_1149 (size=68) 2024-12-09T10:59:22,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=69 2024-12-09T10:59:22,077 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 245fc14e78adb90753336d94265cf7d5 2024-12-09T10:59:22,077 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 245fc14e78adb90753336d94265cf7d5 2024-12-09T10:59:22,077 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1733741960507.a8b9ff6a0eb0adba92ba641ddcac9ba6. 2024-12-09T10:59:22,078 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-12-09T10:59:22,080 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=69, ppid=68, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 245fc14e78adb90753336d94265cf7d5 in 196 msec 2024-12-09T10:59:22,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=70 2024-12-09T10:59:22,082 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region a8b9ff6a0eb0adba92ba641ddcac9ba6 2024-12-09T10:59:22,082 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a8b9ff6a0eb0adba92ba641ddcac9ba6 2024-12-09T10:59:22,086 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=70, resume processing ppid=68 2024-12-09T10:59:22,086 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=70, ppid=68, state=SUCCESS, hasLock=false; SnapshotRegionProcedure a8b9ff6a0eb0adba92ba641ddcac9ba6 in 201 msec 2024-12-09T10:59:22,086 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T10:59:22,087 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T10:59:22,088 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-09T10:59:22,088 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-09T10:59:22,088 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:59:22,089 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-09T10:59:22,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741974_1150 (size=60) 2024-12-09T10:59:22,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741974_1150 (size=60) 2024-12-09T10:59:22,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741974_1150 (size=60) 2024-12-09T10:59:22,141 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T10:59:22,141 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithResetTtl 2024-12-09T10:59:22,142 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl 2024-12-09T10:59:22,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-09T10:59:22,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741975_1151 (size=641) 2024-12-09T10:59:22,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741975_1151 (size=641) 2024-12-09T10:59:22,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741975_1151 (size=641) 2024-12-09T10:59:22,185 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T10:59:22,226 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T10:59:22,226 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/emptySnaptb0-testExportWithResetTtl 2024-12-09T10:59:22,228 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T10:59:22,228 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-09T10:59:22,230 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=68, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 407 msec 2024-12-09T10:59:22,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-09T10:59:22,463 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-09T10:59:22,472 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39691 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,,1733741960507.245fc14e78adb90753336d94265cf7d5. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T10:59:22,483 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33293 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,1,1733741960507.a8b9ff6a0eb0adba92ba641ddcac9ba6. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T10:59:22,487 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-09T10:59:22,491 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithResetTtl 2024-12-09T10:59:22,491 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithResetTtl,,1733741960507.245fc14e78adb90753336d94265cf7d5. 2024-12-09T10:59:22,491 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T10:59:22,494 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-09T10:59:22,510 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-09T10:59:22,522 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-09T10:59:22,531 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-09T10:59:22,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733741962531 (current time:1733741962531). 2024-12-09T10:59:22,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T10:59:22,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-09T10:59:22,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T10:59:22,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4189cfdf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:59:22,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T10:59:22,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T10:59:22,534 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T10:59:22,535 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T10:59:22,535 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T10:59:22,535 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ec19a37, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:59:22,535 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T10:59:22,535 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T10:59:22,536 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:59:22,536 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54716, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T10:59:22,537 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@551d492a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:59:22,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T10:59:22,538 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,39691,1733741766880, seqNum=-1] 2024-12-09T10:59:22,539 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T10:59:22,540 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53210, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T10:59:22,544 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815. 2024-12-09T10:59:22,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T10:59:22,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:59:22,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:59:22,544 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T10:59:22,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6825d21, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:59:22,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T10:59:22,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T10:59:22,546 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T10:59:22,546 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T10:59:22,546 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T10:59:22,547 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@111fbf33, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:59:22,547 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T10:59:22,547 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T10:59:22,547 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:59:22,548 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54740, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T10:59:22,549 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7473e88f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:59:22,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T10:59:22,551 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,39691,1733741766880, seqNum=-1] 2024-12-09T10:59:22,552 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T10:59:22,552 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53224, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T10:59:22,554 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb., hostname=3469f9ca0af3,39691,1733741766880, seqNum=2] 2024-12-09T10:59:22,556 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815. 2024-12-09T10:59:22,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T10:59:22,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:59:22,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:59:22,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-09T10:59:22,557 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T10:59:22,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T10:59:22,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-09T10:59:22,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-09T10:59:22,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-09T10:59:22,564 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T10:59:22,565 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T10:59:22,569 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T10:59:22,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741976_1152 (size=156) 2024-12-09T10:59:22,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741976_1152 (size=156) 2024-12-09T10:59:22,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741976_1152 (size=156) 2024-12-09T10:59:22,618 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T10:59:22,618 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 245fc14e78adb90753336d94265cf7d5}, {pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a8b9ff6a0eb0adba92ba641ddcac9ba6}] 2024-12-09T10:59:22,619 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a8b9ff6a0eb0adba92ba641ddcac9ba6 2024-12-09T10:59:22,620 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 245fc14e78adb90753336d94265cf7d5 2024-12-09T10:59:22,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-09T10:59:22,785 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39691 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=72 2024-12-09T10:59:22,785 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33293 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=73 2024-12-09T10:59:22,786 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1733741960507.a8b9ff6a0eb0adba92ba641ddcac9ba6. 2024-12-09T10:59:22,787 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2902): Flushing a8b9ff6a0eb0adba92ba641ddcac9ba6 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-09T10:59:22,790 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1733741960507.245fc14e78adb90753336d94265cf7d5. 2024-12-09T10:59:22,790 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2902): Flushing 245fc14e78adb90753336d94265cf7d5 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-09T10:59:22,856 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209fa3c30be7bab408fb1b7431fb1622fea_245fc14e78adb90753336d94265cf7d5 is 71, key is 04c7ee09480744f75a45316f6c724114/cf:q/1733741962472/Put/seqid=0 2024-12-09T10:59:22,860 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241209d59b334b57fb47459e229939fd580efb_a8b9ff6a0eb0adba92ba641ddcac9ba6 is 71, key is 11bd27c679d3842c5823dba2ee4909fa/cf:q/1733741962483/Put/seqid=0 2024-12-09T10:59:22,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-09T10:59:22,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741977_1153 (size=5172) 2024-12-09T10:59:22,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741977_1153 (size=5172) 2024-12-09T10:59:22,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741977_1153 (size=5172) 2024-12-09T10:59:22,904 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:59:22,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741978_1154 (size=8102) 2024-12-09T10:59:22,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741978_1154 (size=8102) 2024-12-09T10:59:22,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741978_1154 (size=8102) 2024-12-09T10:59:22,939 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:59:22,968 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209fa3c30be7bab408fb1b7431fb1622fea_245fc14e78adb90753336d94265cf7d5 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e20241209fa3c30be7bab408fb1b7431fb1622fea_245fc14e78adb90753336d94265cf7d5 2024-12-09T10:59:22,972 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241209d59b334b57fb47459e229939fd580efb_a8b9ff6a0eb0adba92ba641ddcac9ba6 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b20241209d59b334b57fb47459e229939fd580efb_a8b9ff6a0eb0adba92ba641ddcac9ba6 2024-12-09T10:59:22,976 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithResetTtl/245fc14e78adb90753336d94265cf7d5/.tmp/cf/6421bf6d0a8049d89c04e2a01dabc206, store: [table=testtb-testExportWithResetTtl family=cf region=245fc14e78adb90753336d94265cf7d5] 2024-12-09T10:59:22,978 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithResetTtl/245fc14e78adb90753336d94265cf7d5/.tmp/cf/6421bf6d0a8049d89c04e2a01dabc206 is 206, key is 0d9be84fd7054741f47f673f7d5dfbbc8/cf:q/1733741962472/Put/seqid=0 2024-12-09T10:59:22,982 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithResetTtl/a8b9ff6a0eb0adba92ba641ddcac9ba6/.tmp/cf/0ae01c40872245f980888a7a560c3d9f, store: [table=testtb-testExportWithResetTtl family=cf region=a8b9ff6a0eb0adba92ba641ddcac9ba6] 2024-12-09T10:59:22,983 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithResetTtl/a8b9ff6a0eb0adba92ba641ddcac9ba6/.tmp/cf/0ae01c40872245f980888a7a560c3d9f is 206, key is 103d61490ef642c05d70835c8a3ed500d/cf:q/1733741962483/Put/seqid=0 2024-12-09T10:59:23,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741979_1155 (size=6108) 2024-12-09T10:59:23,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741979_1155 (size=6108) 2024-12-09T10:59:23,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741979_1155 (size=6108) 2024-12-09T10:59:23,022 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=266, hasBloomFilter=true, into tmp file hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithResetTtl/245fc14e78adb90753336d94265cf7d5/.tmp/cf/6421bf6d0a8049d89c04e2a01dabc206 2024-12-09T10:59:23,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741980_1156 (size=14653) 2024-12-09T10:59:23,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741980_1156 (size=14653) 2024-12-09T10:59:23,025 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithResetTtl/a8b9ff6a0eb0adba92ba641ddcac9ba6/.tmp/cf/0ae01c40872245f980888a7a560c3d9f 2024-12-09T10:59:23,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741980_1156 (size=14653) 2024-12-09T10:59:23,062 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithResetTtl/a8b9ff6a0eb0adba92ba641ddcac9ba6/.tmp/cf/0ae01c40872245f980888a7a560c3d9f as hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithResetTtl/a8b9ff6a0eb0adba92ba641ddcac9ba6/cf/0ae01c40872245f980888a7a560c3d9f 2024-12-09T10:59:23,075 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithResetTtl/245fc14e78adb90753336d94265cf7d5/.tmp/cf/6421bf6d0a8049d89c04e2a01dabc206 as hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithResetTtl/245fc14e78adb90753336d94265cf7d5/cf/6421bf6d0a8049d89c04e2a01dabc206 2024-12-09T10:59:23,079 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithResetTtl/a8b9ff6a0eb0adba92ba641ddcac9ba6/cf/0ae01c40872245f980888a7a560c3d9f, entries=46, sequenceid=6, filesize=14.3 K 2024-12-09T10:59:23,085 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithResetTtl/245fc14e78adb90753336d94265cf7d5/cf/6421bf6d0a8049d89c04e2a01dabc206, entries=4, sequenceid=6, filesize=6.0 K 2024-12-09T10:59:23,090 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for a8b9ff6a0eb0adba92ba641ddcac9ba6 in 303ms, sequenceid=6, compaction requested=false 2024-12-09T10:59:23,090 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithResetTtl' 2024-12-09T10:59:23,091 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2603): Flush status journal for a8b9ff6a0eb0adba92ba641ddcac9ba6: 2024-12-09T10:59:23,091 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1733741960507.a8b9ff6a0eb0adba92ba641ddcac9ba6. for snaptb0-testExportWithResetTtl completed. 2024-12-09T10:59:23,091 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1733741960507.a8b9ff6a0eb0adba92ba641ddcac9ba6.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-09T10:59:23,091 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T10:59:23,091 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithResetTtl/a8b9ff6a0eb0adba92ba641ddcac9ba6/cf/0ae01c40872245f980888a7a560c3d9f] hfiles 2024-12-09T10:59:23,091 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithResetTtl/a8b9ff6a0eb0adba92ba641ddcac9ba6/cf/0ae01c40872245f980888a7a560c3d9f for snapshot=snaptb0-testExportWithResetTtl 2024-12-09T10:59:23,095 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 245fc14e78adb90753336d94265cf7d5 in 305ms, sequenceid=6, compaction requested=false 2024-12-09T10:59:23,096 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2603): Flush status journal for 245fc14e78adb90753336d94265cf7d5: 2024-12-09T10:59:23,096 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1733741960507.245fc14e78adb90753336d94265cf7d5. for snaptb0-testExportWithResetTtl completed. 2024-12-09T10:59:23,096 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1733741960507.245fc14e78adb90753336d94265cf7d5.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-09T10:59:23,096 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T10:59:23,096 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithResetTtl/245fc14e78adb90753336d94265cf7d5/cf/6421bf6d0a8049d89c04e2a01dabc206] hfiles 2024-12-09T10:59:23,096 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithResetTtl/245fc14e78adb90753336d94265cf7d5/cf/6421bf6d0a8049d89c04e2a01dabc206 for snapshot=snaptb0-testExportWithResetTtl 2024-12-09T10:59:23,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741982_1158 (size=107) 2024-12-09T10:59:23,182 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1733741960507.245fc14e78adb90753336d94265cf7d5. 2024-12-09T10:59:23,182 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-12-09T10:59:23,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741982_1158 (size=107) 2024-12-09T10:59:23,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741982_1158 (size=107) 2024-12-09T10:59:23,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=72 2024-12-09T10:59:23,183 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 245fc14e78adb90753336d94265cf7d5 2024-12-09T10:59:23,183 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 245fc14e78adb90753336d94265cf7d5 2024-12-09T10:59:23,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-09T10:59:23,194 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=72, ppid=71, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 245fc14e78adb90753336d94265cf7d5 in 567 msec 2024-12-09T10:59:23,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741981_1157 (size=107) 2024-12-09T10:59:23,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741981_1157 (size=107) 2024-12-09T10:59:23,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741981_1157 (size=107) 2024-12-09T10:59:23,217 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1733741960507.a8b9ff6a0eb0adba92ba641ddcac9ba6. 2024-12-09T10:59:23,217 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=73 2024-12-09T10:59:23,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=73 2024-12-09T10:59:23,218 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region a8b9ff6a0eb0adba92ba641ddcac9ba6 2024-12-09T10:59:23,218 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a8b9ff6a0eb0adba92ba641ddcac9ba6 2024-12-09T10:59:23,235 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=73, resume processing ppid=71 2024-12-09T10:59:23,235 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=73, ppid=71, state=SUCCESS, hasLock=false; SnapshotRegionProcedure a8b9ff6a0eb0adba92ba641ddcac9ba6 in 603 msec 2024-12-09T10:59:23,235 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T10:59:23,237 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T10:59:23,241 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-09T10:59:23,241 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-09T10:59:23,241 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:59:23,245 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b20241209d59b334b57fb47459e229939fd580efb_a8b9ff6a0eb0adba92ba641ddcac9ba6, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e20241209fa3c30be7bab408fb1b7431fb1622fea_245fc14e78adb90753336d94265cf7d5] hfiles 2024-12-09T10:59:23,245 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b20241209d59b334b57fb47459e229939fd580efb_a8b9ff6a0eb0adba92ba641ddcac9ba6 2024-12-09T10:59:23,245 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e20241209fa3c30be7bab408fb1b7431fb1622fea_245fc14e78adb90753336d94265cf7d5 2024-12-09T10:59:23,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741983_1159 (size=291) 2024-12-09T10:59:23,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741983_1159 (size=291) 2024-12-09T10:59:23,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741983_1159 (size=291) 2024-12-09T10:59:23,294 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T10:59:23,294 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithResetTtl 2024-12-09T10:59:23,295 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl 2024-12-09T10:59:23,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741984_1160 (size=951) 2024-12-09T10:59:23,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741984_1160 (size=951) 2024-12-09T10:59:23,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741984_1160 (size=951) 2024-12-09T10:59:23,378 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T10:59:23,390 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T10:59:23,391 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testExportWithResetTtl 2024-12-09T10:59:23,401 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T10:59:23,401 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-09T10:59:23,404 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=71, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 840 msec 2024-12-09T10:59:23,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-09T10:59:23,703 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-09T10:59:23,705 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T10:59:23,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=74, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportWithResetTtl 2024-12-09T10:59:23,707 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T10:59:23,708 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportWithResetTtl" procId is: 74 2024-12-09T10:59:23,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-09T10:59:23,709 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T10:59:23,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741985_1161 (size=433) 2024-12-09T10:59:23,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741985_1161 (size=433) 2024-12-09T10:59:23,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741985_1161 (size=433) 2024-12-09T10:59:23,737 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 83174aa3cf1695218cd5c873b31078c9, NAME => 'testExportWithResetTtl,1,1733741963704.83174aa3cf1695218cd5c873b31078c9.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T10:59:23,738 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 606725660a8158e7655d9103b2bb95e4, NAME => 'testExportWithResetTtl,,1733741963704.606725660a8158e7655d9103b2bb95e4.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T10:59:23,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741987_1163 (size=58) 2024-12-09T10:59:23,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741987_1163 (size=58) 2024-12-09T10:59:23,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741987_1163 (size=58) 2024-12-09T10:59:23,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741986_1162 (size=58) 2024-12-09T10:59:23,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741986_1162 (size=58) 2024-12-09T10:59:23,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741986_1162 (size=58) 2024-12-09T10:59:23,815 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1733741963704.83174aa3cf1695218cd5c873b31078c9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T10:59:23,815 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing 83174aa3cf1695218cd5c873b31078c9, disabling compactions & flushes 2024-12-09T10:59:23,815 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1733741963704.83174aa3cf1695218cd5c873b31078c9. 2024-12-09T10:59:23,815 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1733741963704.83174aa3cf1695218cd5c873b31078c9. 2024-12-09T10:59:23,815 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1733741963704.83174aa3cf1695218cd5c873b31078c9. after waiting 0 ms 2024-12-09T10:59:23,815 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1733741963704.83174aa3cf1695218cd5c873b31078c9. 2024-12-09T10:59:23,815 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1733741963704.83174aa3cf1695218cd5c873b31078c9. 2024-12-09T10:59:23,815 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for 83174aa3cf1695218cd5c873b31078c9: Waiting for close lock at 1733741963815Disabling compacts and flushes for region at 1733741963815Disabling writes for close at 1733741963815Writing region close event to WAL at 1733741963815Closed at 1733741963815 2024-12-09T10:59:23,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-09T10:59:24,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-09T10:59:24,143 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0002_000001 (auth:SIMPLE) from 127.0.0.1:36216 2024-12-09T10:59:24,155 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_2/usercache/jenkins/appcache/application_1733741775522_0002/container_1733741775522_0002_01_000001/launch_container.sh] 2024-12-09T10:59:24,155 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_2/usercache/jenkins/appcache/application_1733741775522_0002/container_1733741775522_0002_01_000001/container_tokens] 2024-12-09T10:59:24,155 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_2/usercache/jenkins/appcache/application_1733741775522_0002/container_1733741775522_0002_01_000001/sysfs] 2024-12-09T10:59:24,186 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1733741963704.606725660a8158e7655d9103b2bb95e4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T10:59:24,186 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing 606725660a8158e7655d9103b2bb95e4, disabling compactions & flushes 2024-12-09T10:59:24,187 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1733741963704.606725660a8158e7655d9103b2bb95e4. 2024-12-09T10:59:24,187 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1733741963704.606725660a8158e7655d9103b2bb95e4. 2024-12-09T10:59:24,187 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1733741963704.606725660a8158e7655d9103b2bb95e4. after waiting 0 ms 2024-12-09T10:59:24,187 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1733741963704.606725660a8158e7655d9103b2bb95e4. 2024-12-09T10:59:24,187 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1733741963704.606725660a8158e7655d9103b2bb95e4. 2024-12-09T10:59:24,187 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 606725660a8158e7655d9103b2bb95e4: Waiting for close lock at 1733741964186Disabling compacts and flushes for region at 1733741964186Disabling writes for close at 1733741964187 (+1 ms)Writing region close event to WAL at 1733741964187Closed at 1733741964187 2024-12-09T10:59:24,191 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T10:59:24,192 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,1,1733741963704.83174aa3cf1695218cd5c873b31078c9.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1733741964192"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733741964192"}]},"ts":"1733741964192"} 2024-12-09T10:59:24,192 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,,1733741963704.606725660a8158e7655d9103b2bb95e4.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1733741964192"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733741964192"}]},"ts":"1733741964192"} 2024-12-09T10:59:24,196 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-09T10:59:24,198 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T10:59:24,198 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733741964198"}]},"ts":"1733741964198"} 2024-12-09T10:59:24,201 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-09T10:59:24,201 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {3469f9ca0af3=0} racks are {/default-rack=0} 2024-12-09T10:59:24,212 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T10:59:24,213 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T10:59:24,213 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T10:59:24,213 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T10:59:24,213 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T10:59:24,213 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T10:59:24,213 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T10:59:24,213 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T10:59:24,213 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T10:59:24,213 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T10:59:24,213 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=606725660a8158e7655d9103b2bb95e4, ASSIGN}, {pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=83174aa3cf1695218cd5c873b31078c9, ASSIGN}] 2024-12-09T10:59:24,219 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=83174aa3cf1695218cd5c873b31078c9, ASSIGN 2024-12-09T10:59:24,219 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=606725660a8158e7655d9103b2bb95e4, ASSIGN 2024-12-09T10:59:24,221 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=83174aa3cf1695218cd5c873b31078c9, ASSIGN; state=OFFLINE, location=3469f9ca0af3,42349,1733741767108; forceNewPlan=false, retain=false 2024-12-09T10:59:24,222 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=606725660a8158e7655d9103b2bb95e4, ASSIGN; state=OFFLINE, location=3469f9ca0af3,39691,1733741766880; forceNewPlan=false, retain=false 2024-12-09T10:59:24,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-09T10:59:24,372 INFO [3469f9ca0af3:35815 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-09T10:59:24,374 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=76 updating hbase:meta row=83174aa3cf1695218cd5c873b31078c9, regionState=OPENING, regionLocation=3469f9ca0af3,42349,1733741767108 2024-12-09T10:59:24,374 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=75 updating hbase:meta row=606725660a8158e7655d9103b2bb95e4, regionState=OPENING, regionLocation=3469f9ca0af3,39691,1733741766880 2024-12-09T10:59:24,377 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=606725660a8158e7655d9103b2bb95e4, ASSIGN because future has completed 2024-12-09T10:59:24,377 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=77, ppid=75, state=RUNNABLE, hasLock=false; OpenRegionProcedure 606725660a8158e7655d9103b2bb95e4, server=3469f9ca0af3,39691,1733741766880}] 2024-12-09T10:59:24,379 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=83174aa3cf1695218cd5c873b31078c9, ASSIGN because future has completed 2024-12-09T10:59:24,382 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=78, ppid=76, state=RUNNABLE, hasLock=false; OpenRegionProcedure 83174aa3cf1695218cd5c873b31078c9, server=3469f9ca0af3,42349,1733741767108}] 2024-12-09T10:59:24,554 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,,1733741963704.606725660a8158e7655d9103b2bb95e4. 2024-12-09T10:59:24,554 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7752): Opening region: {ENCODED => 606725660a8158e7655d9103b2bb95e4, NAME => 'testExportWithResetTtl,,1733741963704.606725660a8158e7655d9103b2bb95e4.', STARTKEY => '', ENDKEY => '1'} 2024-12-09T10:59:24,555 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,,1733741963704.606725660a8158e7655d9103b2bb95e4. service=AccessControlService 2024-12-09T10:59:24,555 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T10:59:24,555 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 606725660a8158e7655d9103b2bb95e4 2024-12-09T10:59:24,555 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1733741963704.606725660a8158e7655d9103b2bb95e4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T10:59:24,556 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7794): checking encryption for 606725660a8158e7655d9103b2bb95e4 2024-12-09T10:59:24,556 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7797): checking classloading for 606725660a8158e7655d9103b2bb95e4 2024-12-09T10:59:24,556 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,1,1733741963704.83174aa3cf1695218cd5c873b31078c9. 2024-12-09T10:59:24,556 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7752): Opening region: {ENCODED => 83174aa3cf1695218cd5c873b31078c9, NAME => 'testExportWithResetTtl,1,1733741963704.83174aa3cf1695218cd5c873b31078c9.', STARTKEY => '1', ENDKEY => ''} 2024-12-09T10:59:24,556 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,1,1733741963704.83174aa3cf1695218cd5c873b31078c9. service=AccessControlService 2024-12-09T10:59:24,556 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T10:59:24,557 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 83174aa3cf1695218cd5c873b31078c9 2024-12-09T10:59:24,557 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1733741963704.83174aa3cf1695218cd5c873b31078c9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T10:59:24,557 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7794): checking encryption for 83174aa3cf1695218cd5c873b31078c9 2024-12-09T10:59:24,557 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7797): checking classloading for 83174aa3cf1695218cd5c873b31078c9 2024-12-09T10:59:24,562 INFO [StoreOpener-83174aa3cf1695218cd5c873b31078c9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 83174aa3cf1695218cd5c873b31078c9 2024-12-09T10:59:24,565 INFO [StoreOpener-83174aa3cf1695218cd5c873b31078c9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 83174aa3cf1695218cd5c873b31078c9 columnFamilyName cf 2024-12-09T10:59:24,569 INFO [StoreOpener-606725660a8158e7655d9103b2bb95e4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 606725660a8158e7655d9103b2bb95e4 2024-12-09T10:59:24,570 DEBUG [StoreOpener-83174aa3cf1695218cd5c873b31078c9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:59:24,575 INFO [StoreOpener-83174aa3cf1695218cd5c873b31078c9-1 {}] regionserver.HStore(327): Store=83174aa3cf1695218cd5c873b31078c9/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T10:59:24,575 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1038): replaying wal for 83174aa3cf1695218cd5c873b31078c9 2024-12-09T10:59:24,576 INFO [StoreOpener-606725660a8158e7655d9103b2bb95e4-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 606725660a8158e7655d9103b2bb95e4 columnFamilyName cf 2024-12-09T10:59:24,578 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportWithResetTtl/83174aa3cf1695218cd5c873b31078c9 2024-12-09T10:59:24,578 DEBUG [StoreOpener-606725660a8158e7655d9103b2bb95e4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:59:24,580 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportWithResetTtl/83174aa3cf1695218cd5c873b31078c9 2024-12-09T10:59:24,580 INFO [StoreOpener-606725660a8158e7655d9103b2bb95e4-1 {}] regionserver.HStore(327): Store=606725660a8158e7655d9103b2bb95e4/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T10:59:24,580 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1038): replaying wal for 606725660a8158e7655d9103b2bb95e4 2024-12-09T10:59:24,581 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportWithResetTtl/606725660a8158e7655d9103b2bb95e4 2024-12-09T10:59:24,588 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1048): stopping wal replay for 83174aa3cf1695218cd5c873b31078c9 2024-12-09T10:59:24,588 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1060): Cleaning up temporary data for 83174aa3cf1695218cd5c873b31078c9 2024-12-09T10:59:24,589 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportWithResetTtl/606725660a8158e7655d9103b2bb95e4 2024-12-09T10:59:24,593 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1048): stopping wal replay for 606725660a8158e7655d9103b2bb95e4 2024-12-09T10:59:24,594 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1060): Cleaning up temporary data for 606725660a8158e7655d9103b2bb95e4 2024-12-09T10:59:24,598 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1093): writing seq id for 83174aa3cf1695218cd5c873b31078c9 2024-12-09T10:59:24,606 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1093): writing seq id for 606725660a8158e7655d9103b2bb95e4 2024-12-09T10:59:24,611 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportWithResetTtl/83174aa3cf1695218cd5c873b31078c9/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T10:59:24,611 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1114): Opened 83174aa3cf1695218cd5c873b31078c9; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72031476, jitterRate=0.0733526349067688}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T10:59:24,612 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 83174aa3cf1695218cd5c873b31078c9 2024-12-09T10:59:24,612 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1006): Region open journal for 83174aa3cf1695218cd5c873b31078c9: Running coprocessor pre-open hook at 1733741964557Writing region info on filesystem at 1733741964557Initializing all the Stores at 1733741964560 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733741964560Cleaning up temporary data from old regions at 1733741964588 (+28 ms)Running coprocessor post-open hooks at 1733741964612 (+24 ms)Region opened successfully at 1733741964612 2024-12-09T10:59:24,621 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,1,1733741963704.83174aa3cf1695218cd5c873b31078c9., pid=78, masterSystemTime=1733741964542 2024-12-09T10:59:24,628 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,1,1733741963704.83174aa3cf1695218cd5c873b31078c9. 2024-12-09T10:59:24,628 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,1,1733741963704.83174aa3cf1695218cd5c873b31078c9. 2024-12-09T10:59:24,628 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportWithResetTtl/606725660a8158e7655d9103b2bb95e4/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T10:59:24,629 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=76 updating hbase:meta row=83174aa3cf1695218cd5c873b31078c9, regionState=OPEN, openSeqNum=2, regionLocation=3469f9ca0af3,42349,1733741767108 2024-12-09T10:59:24,629 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1114): Opened 606725660a8158e7655d9103b2bb95e4; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63404066, jitterRate=-0.05520579218864441}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T10:59:24,629 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 606725660a8158e7655d9103b2bb95e4 2024-12-09T10:59:24,629 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1006): Region open journal for 606725660a8158e7655d9103b2bb95e4: Running coprocessor pre-open hook at 1733741964556Writing region info on filesystem at 1733741964556Initializing all the Stores at 1733741964565 (+9 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733741964566 (+1 ms)Cleaning up temporary data from old regions at 1733741964594 (+28 ms)Running coprocessor post-open hooks at 1733741964629 (+35 ms)Region opened successfully at 1733741964629 2024-12-09T10:59:24,633 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=78, ppid=76, state=RUNNABLE, hasLock=false; OpenRegionProcedure 83174aa3cf1695218cd5c873b31078c9, server=3469f9ca0af3,42349,1733741767108 because future has completed 2024-12-09T10:59:24,638 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,,1733741963704.606725660a8158e7655d9103b2bb95e4., pid=77, masterSystemTime=1733741964536 2024-12-09T10:59:24,649 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,,1733741963704.606725660a8158e7655d9103b2bb95e4. 2024-12-09T10:59:24,649 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,,1733741963704.606725660a8158e7655d9103b2bb95e4. 2024-12-09T10:59:24,655 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=75 updating hbase:meta row=606725660a8158e7655d9103b2bb95e4, regionState=OPEN, openSeqNum=2, regionLocation=3469f9ca0af3,39691,1733741766880 2024-12-09T10:59:24,658 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=77, ppid=75, state=RUNNABLE, hasLock=false; OpenRegionProcedure 606725660a8158e7655d9103b2bb95e4, server=3469f9ca0af3,39691,1733741766880 because future has completed 2024-12-09T10:59:24,660 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=78, resume processing ppid=76 2024-12-09T10:59:24,662 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=78, ppid=76, state=SUCCESS, hasLock=false; OpenRegionProcedure 83174aa3cf1695218cd5c873b31078c9, server=3469f9ca0af3,42349,1733741767108 in 273 msec 2024-12-09T10:59:24,664 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=77, resume processing ppid=75 2024-12-09T10:59:24,664 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=77, ppid=75, state=SUCCESS, hasLock=false; OpenRegionProcedure 606725660a8158e7655d9103b2bb95e4, server=3469f9ca0af3,39691,1733741766880 in 283 msec 2024-12-09T10:59:24,665 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=76, ppid=74, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=83174aa3cf1695218cd5c873b31078c9, ASSIGN in 449 msec 2024-12-09T10:59:24,668 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=75, resume processing ppid=74 2024-12-09T10:59:24,668 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=75, ppid=74, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=606725660a8158e7655d9103b2bb95e4, ASSIGN in 451 msec 2024-12-09T10:59:24,669 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T10:59:24,669 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733741964669"}]},"ts":"1733741964669"} 2024-12-09T10:59:24,673 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-09T10:59:24,674 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T10:59:24,675 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testExportWithResetTtl jenkins: RWXCA 2024-12-09T10:59:24,681 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39691 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-09T10:59:24,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T10:59:24,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T10:59:24,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T10:59:24,686 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T10:59:24,704 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=74, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportWithResetTtl in 995 msec 2024-12-09T10:59:24,704 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-09T10:59:24,705 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-09T10:59:24,705 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-09T10:59:24,705 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-09T10:59:24,705 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-09T10:59:24,705 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-09T10:59:24,710 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-09T10:59:24,710 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-09T10:59:24,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-09T10:59:24,858 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportWithResetTtl completed 2024-12-09T10:59:24,858 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-09T10:59:24,865 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportWithResetTtl 2024-12-09T10:59:24,865 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportWithResetTtl,,1733741963704.606725660a8158e7655d9103b2bb95e4. 2024-12-09T10:59:24,866 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T10:59:24,874 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-09T10:59:24,885 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-09T10:59:24,893 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-09T10:59:24,908 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42349 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,1,1733741963704.83174aa3cf1695218cd5c873b31078c9. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T10:59:24,912 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39691 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,,1733741963704.606725660a8158e7655d9103b2bb95e4. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T10:59:24,913 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-09T10:59:24,921 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportWithResetTtl 2024-12-09T10:59:24,921 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportWithResetTtl,,1733741963704.606725660a8158e7655d9103b2bb95e4. 2024-12-09T10:59:24,922 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T10:59:24,927 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-09T10:59:24,938 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-09T10:59:24,949 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-09T10:59:24,955 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-09T10:59:24,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733741964955 (current time:1733741964955). 2024-12-09T10:59:24,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-09T10:59:24,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T10:59:24,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2cfe5762, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:59:24,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T10:59:24,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T10:59:24,959 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T10:59:24,959 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T10:59:24,960 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T10:59:24,960 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42735d9e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:59:24,960 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T10:59:24,960 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T10:59:24,960 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:59:24,962 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54758, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T10:59:24,967 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1db5c080, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:59:24,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T10:59:24,968 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,39691,1733741766880, seqNum=-1] 2024-12-09T10:59:24,968 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T10:59:24,969 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53238, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T10:59:24,971 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815. 2024-12-09T10:59:24,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T10:59:24,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:59:24,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:59:24,972 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T10:59:24,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46428e86, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:59:24,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T10:59:24,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T10:59:24,985 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T10:59:24,987 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T10:59:24,987 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T10:59:24,987 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2727de71, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:59:24,987 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T10:59:24,987 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T10:59:24,987 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:59:24,991 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54782, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T10:59:24,994 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@166a81d0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:59:24,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T10:59:25,002 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,39691,1733741766880, seqNum=-1] 2024-12-09T10:59:25,002 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T10:59:25,004 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53244, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T10:59:25,006 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb., hostname=3469f9ca0af3,39691,1733741766880, seqNum=2] 2024-12-09T10:59:25,013 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815. 2024-12-09T10:59:25,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T10:59:25,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:59:25,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:59:25,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-09T10:59:25,014 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T10:59:25,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T10:59:25,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=79, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-09T10:59:25,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 79 2024-12-09T10:59:25,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-12-09T10:59:25,025 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PREPARE 2024-12-09T10:59:25,030 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T10:59:25,039 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T10:59:25,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741988_1164 (size=143) 2024-12-09T10:59:25,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741988_1164 (size=143) 2024-12-09T10:59:25,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741988_1164 (size=143) 2024-12-09T10:59:25,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-12-09T10:59:25,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-12-09T10:59:25,458 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T10:59:25,498 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T10:59:25,499 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 606725660a8158e7655d9103b2bb95e4}, {pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 83174aa3cf1695218cd5c873b31078c9}] 2024-12-09T10:59:25,501 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 83174aa3cf1695218cd5c873b31078c9 2024-12-09T10:59:25,501 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 606725660a8158e7655d9103b2bb95e4 2024-12-09T10:59:25,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-12-09T10:59:25,653 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42349 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=81 2024-12-09T10:59:25,653 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,1,1733741963704.83174aa3cf1695218cd5c873b31078c9. 2024-12-09T10:59:25,654 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(2902): Flushing 83174aa3cf1695218cd5c873b31078c9 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-09T10:59:25,659 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39691 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=80 2024-12-09T10:59:25,659 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,,1733741963704.606725660a8158e7655d9103b2bb95e4. 2024-12-09T10:59:25,660 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(2902): Flushing 606725660a8158e7655d9103b2bb95e4 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-09T10:59:25,709 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209bf5afb3da91e4e9d936c30d6babd15e6_606725660a8158e7655d9103b2bb95e4 is 71, key is 03264b413753a8894574420f0755b516/cf:q/1733741964912/Put/seqid=0 2024-12-09T10:59:25,718 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024120922d3bb9db6a548e6995c8dce3e5eb375_83174aa3cf1695218cd5c873b31078c9 is 71, key is 19371ad129b0914a30f7f587311ba468/cf:q/1733741964908/Put/seqid=0 2024-12-09T10:59:25,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741990_1166 (size=8172) 2024-12-09T10:59:25,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741990_1166 (size=8172) 2024-12-09T10:59:25,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741990_1166 (size=8172) 2024-12-09T10:59:25,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741989_1165 (size=5102) 2024-12-09T10:59:25,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741989_1165 (size=5102) 2024-12-09T10:59:25,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741989_1165 (size=5102) 2024-12-09T10:59:25,738 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:59:25,739 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:59:25,747 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209bf5afb3da91e4e9d936c30d6babd15e6_606725660a8158e7655d9103b2bb95e4 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241209bf5afb3da91e4e9d936c30d6babd15e6_606725660a8158e7655d9103b2bb95e4 2024-12-09T10:59:25,749 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportWithResetTtl/606725660a8158e7655d9103b2bb95e4/.tmp/cf/b9881f09ce274684aa2af3aeb1d3c715, store: [table=testExportWithResetTtl family=cf region=606725660a8158e7655d9103b2bb95e4] 2024-12-09T10:59:25,750 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportWithResetTtl/606725660a8158e7655d9103b2bb95e4/.tmp/cf/b9881f09ce274684aa2af3aeb1d3c715 is 199, key is 0c5ca7a75efb9a3347aee3b3684fa7081/cf:q/1733741964912/Put/seqid=0 2024-12-09T10:59:25,750 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024120922d3bb9db6a548e6995c8dce3e5eb375_83174aa3cf1695218cd5c873b31078c9 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b2024120922d3bb9db6a548e6995c8dce3e5eb375_83174aa3cf1695218cd5c873b31078c9 2024-12-09T10:59:25,752 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportWithResetTtl/83174aa3cf1695218cd5c873b31078c9/.tmp/cf/b9dfdb4efc0d4f3d848d28382a8b73ca, store: [table=testExportWithResetTtl family=cf region=83174aa3cf1695218cd5c873b31078c9] 2024-12-09T10:59:25,752 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportWithResetTtl/83174aa3cf1695218cd5c873b31078c9/.tmp/cf/b9dfdb4efc0d4f3d848d28382a8b73ca is 199, key is 172d83d8f40ac72eff01638f32c67f7cf/cf:q/1733741964908/Put/seqid=0 2024-12-09T10:59:25,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741991_1167 (size=14519) 2024-12-09T10:59:25,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741991_1167 (size=14519) 2024-12-09T10:59:25,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741991_1167 (size=14519) 2024-12-09T10:59:25,805 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportWithResetTtl/83174aa3cf1695218cd5c873b31078c9/.tmp/cf/b9dfdb4efc0d4f3d848d28382a8b73ca 2024-12-09T10:59:25,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741992_1168 (size=5878) 2024-12-09T10:59:25,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741992_1168 (size=5878) 2024-12-09T10:59:25,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741992_1168 (size=5878) 2024-12-09T10:59:25,814 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=199, hasBloomFilter=true, into tmp file hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportWithResetTtl/606725660a8158e7655d9103b2bb95e4/.tmp/cf/b9881f09ce274684aa2af3aeb1d3c715 2024-12-09T10:59:25,840 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportWithResetTtl/83174aa3cf1695218cd5c873b31078c9/.tmp/cf/b9dfdb4efc0d4f3d848d28382a8b73ca as hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportWithResetTtl/83174aa3cf1695218cd5c873b31078c9/cf/b9dfdb4efc0d4f3d848d28382a8b73ca 2024-12-09T10:59:25,840 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportWithResetTtl/606725660a8158e7655d9103b2bb95e4/.tmp/cf/b9881f09ce274684aa2af3aeb1d3c715 as hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportWithResetTtl/606725660a8158e7655d9103b2bb95e4/cf/b9881f09ce274684aa2af3aeb1d3c715 2024-12-09T10:59:25,859 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportWithResetTtl/606725660a8158e7655d9103b2bb95e4/cf/b9881f09ce274684aa2af3aeb1d3c715, entries=3, sequenceid=5, filesize=5.7 K 2024-12-09T10:59:25,863 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 606725660a8158e7655d9103b2bb95e4 in 203ms, sequenceid=5, compaction requested=false 2024-12-09T10:59:25,863 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportWithResetTtl' 2024-12-09T10:59:25,864 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(2603): Flush status journal for 606725660a8158e7655d9103b2bb95e4: 2024-12-09T10:59:25,864 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,,1733741963704.606725660a8158e7655d9103b2bb95e4. for snaptb-testExportWithResetTtl completed. 2024-12-09T10:59:25,864 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,,1733741963704.606725660a8158e7655d9103b2bb95e4.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-09T10:59:25,864 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T10:59:25,864 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportWithResetTtl/606725660a8158e7655d9103b2bb95e4/cf/b9881f09ce274684aa2af3aeb1d3c715] hfiles 2024-12-09T10:59:25,865 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportWithResetTtl/606725660a8158e7655d9103b2bb95e4/cf/b9881f09ce274684aa2af3aeb1d3c715 for snapshot=snaptb-testExportWithResetTtl 2024-12-09T10:59:25,866 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportWithResetTtl/83174aa3cf1695218cd5c873b31078c9/cf/b9dfdb4efc0d4f3d848d28382a8b73ca, entries=47, sequenceid=5, filesize=14.2 K 2024-12-09T10:59:25,871 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 83174aa3cf1695218cd5c873b31078c9 in 217ms, sequenceid=5, compaction requested=false 2024-12-09T10:59:25,871 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(2603): Flush status journal for 83174aa3cf1695218cd5c873b31078c9: 2024-12-09T10:59:25,871 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,1,1733741963704.83174aa3cf1695218cd5c873b31078c9. for snaptb-testExportWithResetTtl completed. 2024-12-09T10:59:25,872 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,1,1733741963704.83174aa3cf1695218cd5c873b31078c9.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-09T10:59:25,872 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T10:59:25,872 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportWithResetTtl/83174aa3cf1695218cd5c873b31078c9/cf/b9dfdb4efc0d4f3d848d28382a8b73ca] hfiles 2024-12-09T10:59:25,872 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportWithResetTtl/83174aa3cf1695218cd5c873b31078c9/cf/b9dfdb4efc0d4f3d848d28382a8b73ca for snapshot=snaptb-testExportWithResetTtl 2024-12-09T10:59:25,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741994_1170 (size=100) 2024-12-09T10:59:25,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741993_1169 (size=100) 2024-12-09T10:59:25,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741994_1170 (size=100) 2024-12-09T10:59:25,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741993_1169 (size=100) 2024-12-09T10:59:25,902 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,1,1733741963704.83174aa3cf1695218cd5c873b31078c9. 2024-12-09T10:59:25,902 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=81 2024-12-09T10:59:25,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=81 2024-12-09T10:59:25,903 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 83174aa3cf1695218cd5c873b31078c9 2024-12-09T10:59:25,903 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 83174aa3cf1695218cd5c873b31078c9 2024-12-09T10:59:25,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741993_1169 (size=100) 2024-12-09T10:59:25,919 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=81, ppid=79, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 83174aa3cf1695218cd5c873b31078c9 in 411 msec 2024-12-09T10:59:25,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741994_1170 (size=100) 2024-12-09T10:59:25,922 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,,1733741963704.606725660a8158e7655d9103b2bb95e4. 2024-12-09T10:59:25,922 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-12-09T10:59:25,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=80 2024-12-09T10:59:25,924 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 606725660a8158e7655d9103b2bb95e4 2024-12-09T10:59:25,925 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 606725660a8158e7655d9103b2bb95e4 2024-12-09T10:59:25,945 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=80, resume processing ppid=79 2024-12-09T10:59:25,945 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=80, ppid=79, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 606725660a8158e7655d9103b2bb95e4 in 429 msec 2024-12-09T10:59:25,945 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T10:59:25,950 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T10:59:25,954 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-09T10:59:25,954 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-09T10:59:25,954 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:59:25,963 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b2024120922d3bb9db6a548e6995c8dce3e5eb375_83174aa3cf1695218cd5c873b31078c9, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241209bf5afb3da91e4e9d936c30d6babd15e6_606725660a8158e7655d9103b2bb95e4] hfiles 2024-12-09T10:59:25,963 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b2024120922d3bb9db6a548e6995c8dce3e5eb375_83174aa3cf1695218cd5c873b31078c9 2024-12-09T10:59:25,964 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241209bf5afb3da91e4e9d936c30d6babd15e6_606725660a8158e7655d9103b2bb95e4 2024-12-09T10:59:26,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741995_1171 (size=284) 2024-12-09T10:59:26,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741995_1171 (size=284) 2024-12-09T10:59:26,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741995_1171 (size=284) 2024-12-09T10:59:26,023 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T10:59:26,023 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb-testExportWithResetTtl 2024-12-09T10:59:26,025 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-09T10:59:26,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741996_1172 (size=923) 2024-12-09T10:59:26,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741996_1172 (size=923) 2024-12-09T10:59:26,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741996_1172 (size=923) 2024-12-09T10:59:26,094 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T10:59:26,114 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T10:59:26,115 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-09T10:59:26,119 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T10:59:26,119 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 79 2024-12-09T10:59:26,122 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=79, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } in 1.1040 sec 2024-12-09T10:59:26,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-12-09T10:59:26,153 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportWithResetTtl completed 2024-12-09T10:59:26,180 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733741966180 2024-12-09T10:59:26,180 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:35869, tgtDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733741966180, rawTgtDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733741966180, srcFsUri=hdfs://localhost:35869, srcDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T10:59:26,241 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:35869, inputRoot=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T10:59:26,241 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1751713061_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733741966180, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733741966180/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-09T10:59:26,246 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-09T10:59:26,255 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb-testExportWithResetTtl to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733741966180/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-09T10:59:26,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741997_1173 (size=143) 2024-12-09T10:59:26,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741997_1173 (size=143) 2024-12-09T10:59:26,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741997_1173 (size=143) 2024-12-09T10:59:26,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741998_1174 (size=923) 2024-12-09T10:59:26,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741998_1174 (size=923) 2024-12-09T10:59:26,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741998_1174 (size=923) 2024-12-09T10:59:26,469 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-09T10:59:26,469 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-09T10:59:26,470 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-09T10:59:26,470 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-09T10:59:26,471 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-09T10:59:26,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741999_1175 (size=141) 2024-12-09T10:59:26,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741999_1175 (size=141) 2024-12-09T10:59:26,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741999_1175 (size=141) 2024-12-09T10:59:26,736 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T10:59:26,736 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T10:59:26,736 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T10:59:28,247 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/hadoop-16120632138535106619.jar 2024-12-09T10:59:28,248 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T10:59:28,248 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T10:59:28,348 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/hadoop-8754035790675818715.jar 2024-12-09T10:59:28,349 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T10:59:28,349 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T10:59:28,350 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T10:59:28,350 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T10:59:28,350 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T10:59:28,351 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T10:59:28,351 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-09T10:59:28,351 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-09T10:59:28,352 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-09T10:59:28,352 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-09T10:59:28,352 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-09T10:59:28,353 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-09T10:59:28,353 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-09T10:59:28,353 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-09T10:59:28,354 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-09T10:59:28,354 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-09T10:59:28,354 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-09T10:59:28,355 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T10:59:28,358 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T10:59:28,358 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T10:59:28,359 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T10:59:28,359 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T10:59:28,359 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T10:59:28,360 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T10:59:28,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742000_1176 (size=24020) 2024-12-09T10:59:28,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742000_1176 (size=24020) 2024-12-09T10:59:28,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742000_1176 (size=24020) 2024-12-09T10:59:28,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742001_1177 (size=77755) 2024-12-09T10:59:28,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742001_1177 (size=77755) 2024-12-09T10:59:28,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742001_1177 (size=77755) 2024-12-09T10:59:28,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742002_1178 (size=131360) 2024-12-09T10:59:28,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742002_1178 (size=131360) 2024-12-09T10:59:28,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742002_1178 (size=131360) 2024-12-09T10:59:28,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742003_1179 (size=111793) 2024-12-09T10:59:28,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742003_1179 (size=111793) 2024-12-09T10:59:28,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742003_1179 (size=111793) 2024-12-09T10:59:28,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742004_1180 (size=1832290) 2024-12-09T10:59:28,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742004_1180 (size=1832290) 2024-12-09T10:59:28,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742004_1180 (size=1832290) 2024-12-09T10:59:29,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742005_1181 (size=8360282) 2024-12-09T10:59:29,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742005_1181 (size=8360282) 2024-12-09T10:59:29,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742005_1181 (size=8360282) 2024-12-09T10:59:29,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742006_1182 (size=503880) 2024-12-09T10:59:29,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742006_1182 (size=503880) 2024-12-09T10:59:29,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742006_1182 (size=503880) 2024-12-09T10:59:29,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742007_1183 (size=322274) 2024-12-09T10:59:29,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742007_1183 (size=322274) 2024-12-09T10:59:29,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742007_1183 (size=322274) 2024-12-09T10:59:29,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742008_1184 (size=20406) 2024-12-09T10:59:29,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742008_1184 (size=20406) 2024-12-09T10:59:29,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742008_1184 (size=20406) 2024-12-09T10:59:29,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742009_1185 (size=443171) 2024-12-09T10:59:29,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742009_1185 (size=443171) 2024-12-09T10:59:29,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742009_1185 (size=443171) 2024-12-09T10:59:29,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742010_1186 (size=45609) 2024-12-09T10:59:29,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742010_1186 (size=45609) 2024-12-09T10:59:29,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742010_1186 (size=45609) 2024-12-09T10:59:29,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742011_1187 (size=136454) 2024-12-09T10:59:29,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742011_1187 (size=136454) 2024-12-09T10:59:29,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742011_1187 (size=136454) 2024-12-09T10:59:29,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742012_1188 (size=1597136) 2024-12-09T10:59:29,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742012_1188 (size=1597136) 2024-12-09T10:59:29,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742012_1188 (size=1597136) 2024-12-09T10:59:29,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742013_1189 (size=6425021) 2024-12-09T10:59:29,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742013_1189 (size=6425021) 2024-12-09T10:59:29,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742013_1189 (size=6425021) 2024-12-09T10:59:30,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742014_1190 (size=30873) 2024-12-09T10:59:30,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742014_1190 (size=30873) 2024-12-09T10:59:30,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742014_1190 (size=30873) 2024-12-09T10:59:30,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742015_1191 (size=29229) 2024-12-09T10:59:30,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742015_1191 (size=29229) 2024-12-09T10:59:30,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742015_1191 (size=29229) 2024-12-09T10:59:30,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742016_1192 (size=903861) 2024-12-09T10:59:30,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742016_1192 (size=903861) 2024-12-09T10:59:30,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742016_1192 (size=903861) 2024-12-09T10:59:30,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742017_1193 (size=5175431) 2024-12-09T10:59:30,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742017_1193 (size=5175431) 2024-12-09T10:59:30,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742017_1193 (size=5175431) 2024-12-09T10:59:30,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742018_1194 (size=232881) 2024-12-09T10:59:30,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742018_1194 (size=232881) 2024-12-09T10:59:30,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742018_1194 (size=232881) 2024-12-09T10:59:30,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742019_1195 (size=1323991) 2024-12-09T10:59:30,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742019_1195 (size=1323991) 2024-12-09T10:59:30,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742019_1195 (size=1323991) 2024-12-09T10:59:30,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742020_1196 (size=4695811) 2024-12-09T10:59:30,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742020_1196 (size=4695811) 2024-12-09T10:59:30,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742020_1196 (size=4695811) 2024-12-09T10:59:30,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742021_1197 (size=1877034) 2024-12-09T10:59:30,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742021_1197 (size=1877034) 2024-12-09T10:59:30,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742021_1197 (size=1877034) 2024-12-09T10:59:30,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742022_1198 (size=217555) 2024-12-09T10:59:30,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742022_1198 (size=217555) 2024-12-09T10:59:30,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742022_1198 (size=217555) 2024-12-09T10:59:30,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742023_1199 (size=4188619) 2024-12-09T10:59:30,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742023_1199 (size=4188619) 2024-12-09T10:59:30,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742023_1199 (size=4188619) 2024-12-09T10:59:31,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742024_1200 (size=127628) 2024-12-09T10:59:31,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742024_1200 (size=127628) 2024-12-09T10:59:31,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742024_1200 (size=127628) 2024-12-09T10:59:31,231 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-09T10:59:31,237 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb-testExportWithResetTtl' hfile list 2024-12-09T10:59:31,240 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.2 K 2024-12-09T10:59:31,240 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.0 K 2024-12-09T10:59:31,240 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.7 K 2024-12-09T10:59:31,240 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.0 K 2024-12-09T10:59:31,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742025_1201 (size=995) 2024-12-09T10:59:31,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742025_1201 (size=995) 2024-12-09T10:59:31,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742025_1201 (size=995) 2024-12-09T10:59:31,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742026_1202 (size=35) 2024-12-09T10:59:31,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742026_1202 (size=35) 2024-12-09T10:59:31,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742026_1202 (size=35) 2024-12-09T10:59:31,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742027_1203 (size=304073) 2024-12-09T10:59:31,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742027_1203 (size=304073) 2024-12-09T10:59:31,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742027_1203 (size=304073) 2024-12-09T10:59:31,358 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T10:59:31,358 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T10:59:31,974 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T10:59:32,120 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0003_000001 (auth:SIMPLE) from 127.0.0.1:54646 2024-12-09T10:59:34,500 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T10:59:40,018 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0003_000001 (auth:SIMPLE) from 127.0.0.1:57818 2024-12-09T10:59:40,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742028_1204 (size=349771) 2024-12-09T10:59:40,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742028_1204 (size=349771) 2024-12-09T10:59:40,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742028_1204 (size=349771) 2024-12-09T10:59:42,471 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0003_000001 (auth:SIMPLE) from 127.0.0.1:58824 2024-12-09T10:59:42,472 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0003_000001 (auth:SIMPLE) from 127.0.0.1:53506 2024-12-09T10:59:42,476 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0003_000001 (auth:SIMPLE) from 127.0.0.1:58826 2024-12-09T10:59:43,191 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0003_000001 (auth:SIMPLE) from 127.0.0.1:53522 2024-12-09T10:59:45,250 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733741775522_0003_01_000006 while processing FINISH_CONTAINERS event 2024-12-09T10:59:52,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742029_1205 (size=14519) 2024-12-09T10:59:52,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742029_1205 (size=14519) 2024-12-09T10:59:52,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742029_1205 (size=14519) 2024-12-09T10:59:52,823 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_0/usercache/jenkins/appcache/application_1733741775522_0003/container_1733741775522_0003_01_000002/launch_container.sh] 2024-12-09T10:59:52,823 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_0/usercache/jenkins/appcache/application_1733741775522_0003/container_1733741775522_0003_01_000002/container_tokens] 2024-12-09T10:59:52,823 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_0/usercache/jenkins/appcache/application_1733741775522_0003/container_1733741775522_0003_01_000002/sysfs] 2024-12-09T10:59:52,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742031_1207 (size=5878) 2024-12-09T10:59:52,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742031_1207 (size=5878) 2024-12-09T10:59:52,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742031_1207 (size=5878) 2024-12-09T10:59:55,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742032_1208 (size=8172) 2024-12-09T10:59:55,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742032_1208 (size=8172) 2024-12-09T10:59:55,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742032_1208 (size=8172) 2024-12-09T10:59:55,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742033_1209 (size=5102) 2024-12-09T10:59:55,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742033_1209 (size=5102) 2024-12-09T10:59:55,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742033_1209 (size=5102) 2024-12-09T10:59:55,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742030_1206 (size=31708) 2024-12-09T10:59:55,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742030_1206 (size=31708) 2024-12-09T10:59:55,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742030_1206 (size=31708) 2024-12-09T10:59:55,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742034_1210 (size=462) 2024-12-09T10:59:55,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742034_1210 (size=462) 2024-12-09T10:59:55,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742034_1210 (size=462) 2024-12-09T10:59:55,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742035_1211 (size=31708) 2024-12-09T10:59:55,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742035_1211 (size=31708) 2024-12-09T10:59:55,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742035_1211 (size=31708) 2024-12-09T10:59:55,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742036_1212 (size=349771) 2024-12-09T10:59:55,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742036_1212 (size=349771) 2024-12-09T10:59:55,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742036_1212 (size=349771) 2024-12-09T10:59:56,003 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0003_000001 (auth:SIMPLE) from 127.0.0.1:41388 2024-12-09T10:59:56,017 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0003_000001 (auth:SIMPLE) from 127.0.0.1:41394 2024-12-09T10:59:56,046 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_1/usercache/jenkins/appcache/application_1733741775522_0003/container_1733741775522_0003_01_000005/launch_container.sh] 2024-12-09T10:59:56,047 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_1/usercache/jenkins/appcache/application_1733741775522_0003/container_1733741775522_0003_01_000005/container_tokens] 2024-12-09T10:59:56,047 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_1/usercache/jenkins/appcache/application_1733741775522_0003/container_1733741775522_0003_01_000005/sysfs] 2024-12-09T10:59:57,763 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-09T10:59:57,778 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-09T10:59:57,802 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb-testExportWithResetTtl 2024-12-09T10:59:57,802 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-09T10:59:57,803 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-09T10:59:57,803 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1751713061_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-09T10:59:57,804 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-09T10:59:57,804 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-09T10:59:57,804 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1751713061_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733741966180/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733741966180/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-09T10:59:57,805 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733741966180/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-09T10:59:57,805 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733741966180/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-09T10:59:57,823 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testExportWithResetTtl 2024-12-09T10:59:57,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=82, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportWithResetTtl 2024-12-09T10:59:57,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-12-09T10:59:57,832 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733741997832"}]},"ts":"1733741997832"} 2024-12-09T10:59:57,842 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-09T10:59:57,842 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testExportWithResetTtl to state=DISABLING 2024-12-09T10:59:57,843 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=83, ppid=82, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl}] 2024-12-09T10:59:57,847 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=606725660a8158e7655d9103b2bb95e4, UNASSIGN}, {pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=83174aa3cf1695218cd5c873b31078c9, UNASSIGN}] 2024-12-09T10:59:57,848 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=83174aa3cf1695218cd5c873b31078c9, UNASSIGN 2024-12-09T10:59:57,848 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=606725660a8158e7655d9103b2bb95e4, UNASSIGN 2024-12-09T10:59:57,849 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=84 updating hbase:meta row=606725660a8158e7655d9103b2bb95e4, regionState=CLOSING, regionLocation=3469f9ca0af3,39691,1733741766880 2024-12-09T10:59:57,849 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=85 updating hbase:meta row=83174aa3cf1695218cd5c873b31078c9, regionState=CLOSING, regionLocation=3469f9ca0af3,42349,1733741767108 2024-12-09T10:59:57,854 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=606725660a8158e7655d9103b2bb95e4, UNASSIGN because future has completed 2024-12-09T10:59:57,857 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=83174aa3cf1695218cd5c873b31078c9, UNASSIGN because future has completed 2024-12-09T10:59:57,857 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T10:59:57,857 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=86, ppid=84, state=RUNNABLE, hasLock=false; CloseRegionProcedure 606725660a8158e7655d9103b2bb95e4, server=3469f9ca0af3,39691,1733741766880}] 2024-12-09T10:59:57,862 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T10:59:57,862 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=87, ppid=85, state=RUNNABLE, hasLock=false; CloseRegionProcedure 83174aa3cf1695218cd5c873b31078c9, server=3469f9ca0af3,42349,1733741767108}] 2024-12-09T10:59:57,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-12-09T10:59:58,016 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(122): Close 606725660a8158e7655d9103b2bb95e4 2024-12-09T10:59:58,016 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T10:59:58,017 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1722): Closing 606725660a8158e7655d9103b2bb95e4, disabling compactions & flushes 2024-12-09T10:59:58,017 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1733741963704.606725660a8158e7655d9103b2bb95e4. 2024-12-09T10:59:58,017 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1733741963704.606725660a8158e7655d9103b2bb95e4. 2024-12-09T10:59:58,017 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1733741963704.606725660a8158e7655d9103b2bb95e4. after waiting 0 ms 2024-12-09T10:59:58,017 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1733741963704.606725660a8158e7655d9103b2bb95e4. 2024-12-09T10:59:58,019 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(122): Close 83174aa3cf1695218cd5c873b31078c9 2024-12-09T10:59:58,019 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T10:59:58,019 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1722): Closing 83174aa3cf1695218cd5c873b31078c9, disabling compactions & flushes 2024-12-09T10:59:58,019 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1733741963704.83174aa3cf1695218cd5c873b31078c9. 2024-12-09T10:59:58,019 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1733741963704.83174aa3cf1695218cd5c873b31078c9. 2024-12-09T10:59:58,019 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1733741963704.83174aa3cf1695218cd5c873b31078c9. after waiting 0 ms 2024-12-09T10:59:58,019 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1733741963704.83174aa3cf1695218cd5c873b31078c9. 2024-12-09T10:59:58,046 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportWithResetTtl/606725660a8158e7655d9103b2bb95e4/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-09T10:59:58,047 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T10:59:58,047 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1733741963704.606725660a8158e7655d9103b2bb95e4. 2024-12-09T10:59:58,047 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1676): Region close journal for 606725660a8158e7655d9103b2bb95e4: Waiting for close lock at 1733741998016Running coprocessor pre-close hooks at 1733741998016Disabling compacts and flushes for region at 1733741998016Disabling writes for close at 1733741998017 (+1 ms)Writing region close event to WAL at 1733741998029 (+12 ms)Running coprocessor post-close hooks at 1733741998047 (+18 ms)Closed at 1733741998047 2024-12-09T10:59:58,051 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(157): Closed 606725660a8158e7655d9103b2bb95e4 2024-12-09T10:59:58,051 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=84 updating hbase:meta row=606725660a8158e7655d9103b2bb95e4, regionState=CLOSED 2024-12-09T10:59:58,054 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=86, ppid=84, state=RUNNABLE, hasLock=false; CloseRegionProcedure 606725660a8158e7655d9103b2bb95e4, server=3469f9ca0af3,39691,1733741766880 because future has completed 2024-12-09T10:59:58,057 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=86, resume processing ppid=84 2024-12-09T10:59:58,058 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=86, ppid=84, state=SUCCESS, hasLock=false; CloseRegionProcedure 606725660a8158e7655d9103b2bb95e4, server=3469f9ca0af3,39691,1733741766880 in 198 msec 2024-12-09T10:59:58,058 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportWithResetTtl/83174aa3cf1695218cd5c873b31078c9/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-09T10:59:58,059 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=84, ppid=83, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=606725660a8158e7655d9103b2bb95e4, UNASSIGN in 212 msec 2024-12-09T10:59:58,059 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T10:59:58,059 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1733741963704.83174aa3cf1695218cd5c873b31078c9. 2024-12-09T10:59:58,059 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1676): Region close journal for 83174aa3cf1695218cd5c873b31078c9: Waiting for close lock at 1733741998019Running coprocessor pre-close hooks at 1733741998019Disabling compacts and flushes for region at 1733741998019Disabling writes for close at 1733741998019Writing region close event to WAL at 1733741998030 (+11 ms)Running coprocessor post-close hooks at 1733741998059 (+29 ms)Closed at 1733741998059 2024-12-09T10:59:58,062 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(157): Closed 83174aa3cf1695218cd5c873b31078c9 2024-12-09T10:59:58,063 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=85 updating hbase:meta row=83174aa3cf1695218cd5c873b31078c9, regionState=CLOSED 2024-12-09T10:59:58,066 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=87, ppid=85, state=RUNNABLE, hasLock=false; CloseRegionProcedure 83174aa3cf1695218cd5c873b31078c9, server=3469f9ca0af3,42349,1733741767108 because future has completed 2024-12-09T10:59:58,075 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=87, resume processing ppid=85 2024-12-09T10:59:58,075 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=87, ppid=85, state=SUCCESS, hasLock=false; CloseRegionProcedure 83174aa3cf1695218cd5c873b31078c9, server=3469f9ca0af3,42349,1733741767108 in 206 msec 2024-12-09T10:59:58,078 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=85, resume processing ppid=83 2024-12-09T10:59:58,079 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=85, ppid=83, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=83174aa3cf1695218cd5c873b31078c9, UNASSIGN in 229 msec 2024-12-09T10:59:58,084 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=83, resume processing ppid=82 2024-12-09T10:59:58,084 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=83, ppid=82, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl in 236 msec 2024-12-09T10:59:58,086 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733741998086"}]},"ts":"1733741998086"} 2024-12-09T10:59:58,089 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-09T10:59:58,089 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testExportWithResetTtl to state=DISABLED 2024-12-09T10:59:58,092 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=82, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportWithResetTtl in 267 msec 2024-12-09T10:59:58,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-12-09T10:59:58,157 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportWithResetTtl completed 2024-12-09T10:59:58,158 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testExportWithResetTtl 2024-12-09T10:59:58,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=88, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl 2024-12-09T10:59:58,163 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=88, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-09T10:59:58,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] access.PermissionStorage(261): Removing permissions of removed table testExportWithResetTtl 2024-12-09T10:59:58,165 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=88, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-09T10:59:58,170 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39691 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportWithResetTtl 2024-12-09T10:59:58,173 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportWithResetTtl/606725660a8158e7655d9103b2bb95e4 2024-12-09T10:59:58,176 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportWithResetTtl/606725660a8158e7655d9103b2bb95e4/cf, FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportWithResetTtl/606725660a8158e7655d9103b2bb95e4/recovered.edits] 2024-12-09T10:59:58,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-09T10:59:58,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-09T10:59:58,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-09T10:59:58,181 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-09T10:59:58,181 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-09T10:59:58,182 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportWithResetTtl/83174aa3cf1695218cd5c873b31078c9 2024-12-09T10:59:58,184 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-09T10:59:58,185 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-09T10:59:58,186 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data null 2024-12-09T10:59:58,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-09T10:59:58,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T10:59:58,186 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-09T10:59:58,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T10:59:58,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-09T10:59:58,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T10:59:58,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-09T10:59:58,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T10:59:58,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=88 2024-12-09T10:59:58,189 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-09T10:59:58,189 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-09T10:59:58,192 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-09T10:59:58,192 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-09T10:59:58,194 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportWithResetTtl/83174aa3cf1695218cd5c873b31078c9/cf, FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportWithResetTtl/83174aa3cf1695218cd5c873b31078c9/recovered.edits] 2024-12-09T10:59:58,198 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportWithResetTtl/606725660a8158e7655d9103b2bb95e4/cf/b9881f09ce274684aa2af3aeb1d3c715 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testExportWithResetTtl/606725660a8158e7655d9103b2bb95e4/cf/b9881f09ce274684aa2af3aeb1d3c715 2024-12-09T10:59:58,208 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportWithResetTtl/83174aa3cf1695218cd5c873b31078c9/cf/b9dfdb4efc0d4f3d848d28382a8b73ca to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testExportWithResetTtl/83174aa3cf1695218cd5c873b31078c9/cf/b9dfdb4efc0d4f3d848d28382a8b73ca 2024-12-09T10:59:58,209 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportWithResetTtl/606725660a8158e7655d9103b2bb95e4/recovered.edits/8.seqid to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testExportWithResetTtl/606725660a8158e7655d9103b2bb95e4/recovered.edits/8.seqid 2024-12-09T10:59:58,210 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportWithResetTtl/606725660a8158e7655d9103b2bb95e4 2024-12-09T10:59:58,217 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportWithResetTtl/83174aa3cf1695218cd5c873b31078c9/recovered.edits/8.seqid to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testExportWithResetTtl/83174aa3cf1695218cd5c873b31078c9/recovered.edits/8.seqid 2024-12-09T10:59:58,218 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportWithResetTtl/83174aa3cf1695218cd5c873b31078c9 2024-12-09T10:59:58,218 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testExportWithResetTtl regions 2024-12-09T10:59:58,219 DEBUG [PEWorker-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d 2024-12-09T10:59:58,220 DEBUG [PEWorker-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf] 2024-12-09T10:59:58,225 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b2024120922d3bb9db6a548e6995c8dce3e5eb375_83174aa3cf1695218cd5c873b31078c9 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b2024120922d3bb9db6a548e6995c8dce3e5eb375_83174aa3cf1695218cd5c873b31078c9 2024-12-09T10:59:58,227 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241209bf5afb3da91e4e9d936c30d6babd15e6_606725660a8158e7655d9103b2bb95e4 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241209bf5afb3da91e4e9d936c30d6babd15e6_606725660a8158e7655d9103b2bb95e4 2024-12-09T10:59:58,228 DEBUG [PEWorker-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d 2024-12-09T10:59:58,231 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=88, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-09T10:59:58,235 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testExportWithResetTtl from hbase:meta 2024-12-09T10:59:58,238 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testExportWithResetTtl' descriptor. 2024-12-09T10:59:58,244 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=88, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-09T10:59:58,244 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testExportWithResetTtl' from region states. 2024-12-09T10:59:58,244 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,,1733741963704.606725660a8158e7655d9103b2bb95e4.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733741998244"}]},"ts":"9223372036854775807"} 2024-12-09T10:59:58,244 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,1,1733741963704.83174aa3cf1695218cd5c873b31078c9.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733741998244"}]},"ts":"9223372036854775807"} 2024-12-09T10:59:58,250 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-09T10:59:58,250 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 606725660a8158e7655d9103b2bb95e4, NAME => 'testExportWithResetTtl,,1733741963704.606725660a8158e7655d9103b2bb95e4.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 83174aa3cf1695218cd5c873b31078c9, NAME => 'testExportWithResetTtl,1,1733741963704.83174aa3cf1695218cd5c873b31078c9.', STARTKEY => '1', ENDKEY => ''}] 2024-12-09T10:59:58,251 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testExportWithResetTtl' as deleted. 2024-12-09T10:59:58,251 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733741998251"}]},"ts":"9223372036854775807"} 2024-12-09T10:59:58,254 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testExportWithResetTtl state from META 2024-12-09T10:59:58,256 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=88, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-09T10:59:58,258 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=88, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl in 98 msec 2024-12-09T10:59:58,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=88 2024-12-09T10:59:58,295 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testExportWithResetTtl 2024-12-09T10:59:58,295 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportWithResetTtl completed 2024-12-09T10:59:58,296 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithResetTtl 2024-12-09T10:59:58,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=89, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl 2024-12-09T10:59:58,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-12-09T10:59:58,308 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733741998307"}]},"ts":"1733741998307"} 2024-12-09T10:59:58,312 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-09T10:59:58,312 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithResetTtl to state=DISABLING 2024-12-09T10:59:58,315 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl}] 2024-12-09T10:59:58,322 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=245fc14e78adb90753336d94265cf7d5, UNASSIGN}, {pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=a8b9ff6a0eb0adba92ba641ddcac9ba6, UNASSIGN}] 2024-12-09T10:59:58,326 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=a8b9ff6a0eb0adba92ba641ddcac9ba6, UNASSIGN 2024-12-09T10:59:58,327 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=245fc14e78adb90753336d94265cf7d5, UNASSIGN 2024-12-09T10:59:58,330 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=92 updating hbase:meta row=a8b9ff6a0eb0adba92ba641ddcac9ba6, regionState=CLOSING, regionLocation=3469f9ca0af3,33293,1733741767044 2024-12-09T10:59:58,330 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=91 updating hbase:meta row=245fc14e78adb90753336d94265cf7d5, regionState=CLOSING, regionLocation=3469f9ca0af3,39691,1733741766880 2024-12-09T10:59:58,344 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=a8b9ff6a0eb0adba92ba641ddcac9ba6, UNASSIGN because future has completed 2024-12-09T10:59:58,347 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T10:59:58,347 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=93, ppid=92, state=RUNNABLE, hasLock=false; CloseRegionProcedure a8b9ff6a0eb0adba92ba641ddcac9ba6, server=3469f9ca0af3,33293,1733741767044}] 2024-12-09T10:59:58,348 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=245fc14e78adb90753336d94265cf7d5, UNASSIGN because future has completed 2024-12-09T10:59:58,350 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T10:59:58,350 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=94, ppid=91, state=RUNNABLE, hasLock=false; CloseRegionProcedure 245fc14e78adb90753336d94265cf7d5, server=3469f9ca0af3,39691,1733741766880}] 2024-12-09T10:59:58,354 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_2/usercache/jenkins/appcache/application_1733741775522_0003/container_1733741775522_0003_01_000004/launch_container.sh] 2024-12-09T10:59:58,355 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_2/usercache/jenkins/appcache/application_1733741775522_0003/container_1733741775522_0003_01_000004/container_tokens] 2024-12-09T10:59:58,355 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_2/usercache/jenkins/appcache/application_1733741775522_0003/container_1733741775522_0003_01_000004/sysfs] 2024-12-09T10:59:58,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-12-09T10:59:58,502 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(122): Close a8b9ff6a0eb0adba92ba641ddcac9ba6 2024-12-09T10:59:58,502 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T10:59:58,502 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1722): Closing a8b9ff6a0eb0adba92ba641ddcac9ba6, disabling compactions & flushes 2024-12-09T10:59:58,502 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1733741960507.a8b9ff6a0eb0adba92ba641ddcac9ba6. 2024-12-09T10:59:58,502 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1733741960507.a8b9ff6a0eb0adba92ba641ddcac9ba6. 2024-12-09T10:59:58,502 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1733741960507.a8b9ff6a0eb0adba92ba641ddcac9ba6. after waiting 0 ms 2024-12-09T10:59:58,502 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1733741960507.a8b9ff6a0eb0adba92ba641ddcac9ba6. 2024-12-09T10:59:58,508 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithResetTtl/a8b9ff6a0eb0adba92ba641ddcac9ba6/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T10:59:58,509 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T10:59:58,509 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1733741960507.a8b9ff6a0eb0adba92ba641ddcac9ba6. 2024-12-09T10:59:58,509 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1676): Region close journal for a8b9ff6a0eb0adba92ba641ddcac9ba6: Waiting for close lock at 1733741998502Running coprocessor pre-close hooks at 1733741998502Disabling compacts and flushes for region at 1733741998502Disabling writes for close at 1733741998502Writing region close event to WAL at 1733741998503 (+1 ms)Running coprocessor post-close hooks at 1733741998508 (+5 ms)Closed at 1733741998509 (+1 ms) 2024-12-09T10:59:58,511 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(157): Closed a8b9ff6a0eb0adba92ba641ddcac9ba6 2024-12-09T10:59:58,511 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=92 updating hbase:meta row=a8b9ff6a0eb0adba92ba641ddcac9ba6, regionState=CLOSED 2024-12-09T10:59:58,513 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=93, ppid=92, state=RUNNABLE, hasLock=false; CloseRegionProcedure a8b9ff6a0eb0adba92ba641ddcac9ba6, server=3469f9ca0af3,33293,1733741767044 because future has completed 2024-12-09T10:59:58,516 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=93, resume processing ppid=92 2024-12-09T10:59:58,516 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=93, ppid=92, state=SUCCESS, hasLock=false; CloseRegionProcedure a8b9ff6a0eb0adba92ba641ddcac9ba6, server=3469f9ca0af3,33293,1733741767044 in 167 msec 2024-12-09T10:59:58,517 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=92, ppid=90, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=a8b9ff6a0eb0adba92ba641ddcac9ba6, UNASSIGN in 194 msec 2024-12-09T10:59:58,536 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(122): Close 245fc14e78adb90753336d94265cf7d5 2024-12-09T10:59:58,536 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T10:59:58,536 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1722): Closing 245fc14e78adb90753336d94265cf7d5, disabling compactions & flushes 2024-12-09T10:59:58,536 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1733741960507.245fc14e78adb90753336d94265cf7d5. 2024-12-09T10:59:58,536 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1733741960507.245fc14e78adb90753336d94265cf7d5. 2024-12-09T10:59:58,536 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1733741960507.245fc14e78adb90753336d94265cf7d5. after waiting 0 ms 2024-12-09T10:59:58,536 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1733741960507.245fc14e78adb90753336d94265cf7d5. 2024-12-09T10:59:58,540 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithResetTtl/245fc14e78adb90753336d94265cf7d5/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T10:59:58,541 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T10:59:58,541 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1733741960507.245fc14e78adb90753336d94265cf7d5. 2024-12-09T10:59:58,541 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1676): Region close journal for 245fc14e78adb90753336d94265cf7d5: Waiting for close lock at 1733741998536Running coprocessor pre-close hooks at 1733741998536Disabling compacts and flushes for region at 1733741998536Disabling writes for close at 1733741998536Writing region close event to WAL at 1733741998537 (+1 ms)Running coprocessor post-close hooks at 1733741998541 (+4 ms)Closed at 1733741998541 2024-12-09T10:59:58,543 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(157): Closed 245fc14e78adb90753336d94265cf7d5 2024-12-09T10:59:58,543 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=91 updating hbase:meta row=245fc14e78adb90753336d94265cf7d5, regionState=CLOSED 2024-12-09T10:59:58,545 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=94, ppid=91, state=RUNNABLE, hasLock=false; CloseRegionProcedure 245fc14e78adb90753336d94265cf7d5, server=3469f9ca0af3,39691,1733741766880 because future has completed 2024-12-09T10:59:58,548 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=94, resume processing ppid=91 2024-12-09T10:59:58,548 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=94, ppid=91, state=SUCCESS, hasLock=false; CloseRegionProcedure 245fc14e78adb90753336d94265cf7d5, server=3469f9ca0af3,39691,1733741766880 in 196 msec 2024-12-09T10:59:58,550 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=91, resume processing ppid=90 2024-12-09T10:59:58,550 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=91, ppid=90, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=245fc14e78adb90753336d94265cf7d5, UNASSIGN in 226 msec 2024-12-09T10:59:58,552 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=90, resume processing ppid=89 2024-12-09T10:59:58,552 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=90, ppid=89, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl in 235 msec 2024-12-09T10:59:58,553 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733741998553"}]},"ts":"1733741998553"} 2024-12-09T10:59:58,555 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-09T10:59:58,555 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithResetTtl to state=DISABLED 2024-12-09T10:59:58,557 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=89, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl in 260 msec 2024-12-09T10:59:58,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-12-09T10:59:58,623 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-09T10:59:58,623 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithResetTtl 2024-12-09T10:59:58,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-09T10:59:58,632 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-09T10:59:58,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithResetTtl 2024-12-09T10:59:58,633 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=95, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-09T10:59:58,638 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithResetTtl/245fc14e78adb90753336d94265cf7d5 2024-12-09T10:59:58,641 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithResetTtl/245fc14e78adb90753336d94265cf7d5/cf, FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithResetTtl/245fc14e78adb90753336d94265cf7d5/recovered.edits] 2024-12-09T10:59:58,643 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39691 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithResetTtl 2024-12-09T10:59:58,644 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithResetTtl/a8b9ff6a0eb0adba92ba641ddcac9ba6 2024-12-09T10:59:58,647 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithResetTtl/a8b9ff6a0eb0adba92ba641ddcac9ba6/cf, FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithResetTtl/a8b9ff6a0eb0adba92ba641ddcac9ba6/recovered.edits] 2024-12-09T10:59:58,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-09T10:59:58,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-09T10:59:58,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-09T10:59:58,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-09T10:59:58,649 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-09T10:59:58,649 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-09T10:59:58,649 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-09T10:59:58,649 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-09T10:59:58,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-09T10:59:58,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T10:59:58,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-09T10:59:58,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T10:59:58,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-09T10:59:58,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T10:59:58,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-09T10:59:58,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T10:59:58,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=95 2024-12-09T10:59:58,654 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithResetTtl/a8b9ff6a0eb0adba92ba641ddcac9ba6/cf/0ae01c40872245f980888a7a560c3d9f to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportWithResetTtl/a8b9ff6a0eb0adba92ba641ddcac9ba6/cf/0ae01c40872245f980888a7a560c3d9f 2024-12-09T10:59:58,656 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithResetTtl/245fc14e78adb90753336d94265cf7d5/cf/6421bf6d0a8049d89c04e2a01dabc206 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportWithResetTtl/245fc14e78adb90753336d94265cf7d5/cf/6421bf6d0a8049d89c04e2a01dabc206 2024-12-09T10:59:58,659 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithResetTtl/a8b9ff6a0eb0adba92ba641ddcac9ba6/recovered.edits/9.seqid to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportWithResetTtl/a8b9ff6a0eb0adba92ba641ddcac9ba6/recovered.edits/9.seqid 2024-12-09T10:59:58,660 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithResetTtl/a8b9ff6a0eb0adba92ba641ddcac9ba6 2024-12-09T10:59:58,662 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithResetTtl/245fc14e78adb90753336d94265cf7d5/recovered.edits/9.seqid to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportWithResetTtl/245fc14e78adb90753336d94265cf7d5/recovered.edits/9.seqid 2024-12-09T10:59:58,662 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithResetTtl/245fc14e78adb90753336d94265cf7d5 2024-12-09T10:59:58,663 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithResetTtl regions 2024-12-09T10:59:58,663 DEBUG [PEWorker-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e 2024-12-09T10:59:58,664 DEBUG [PEWorker-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf] 2024-12-09T10:59:58,670 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b20241209d59b334b57fb47459e229939fd580efb_a8b9ff6a0eb0adba92ba641ddcac9ba6 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b20241209d59b334b57fb47459e229939fd580efb_a8b9ff6a0eb0adba92ba641ddcac9ba6 2024-12-09T10:59:58,672 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e20241209fa3c30be7bab408fb1b7431fb1622fea_245fc14e78adb90753336d94265cf7d5 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e20241209fa3c30be7bab408fb1b7431fb1622fea_245fc14e78adb90753336d94265cf7d5 2024-12-09T10:59:58,673 DEBUG [PEWorker-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e 2024-12-09T10:59:58,677 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=95, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-09T10:59:58,681 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithResetTtl from hbase:meta 2024-12-09T10:59:58,684 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithResetTtl' descriptor. 2024-12-09T10:59:58,685 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=95, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-09T10:59:58,685 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithResetTtl' from region states. 2024-12-09T10:59:58,686 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,,1733741960507.245fc14e78adb90753336d94265cf7d5.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733741998685"}]},"ts":"9223372036854775807"} 2024-12-09T10:59:58,686 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,1,1733741960507.a8b9ff6a0eb0adba92ba641ddcac9ba6.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733741998685"}]},"ts":"9223372036854775807"} 2024-12-09T10:59:58,688 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-09T10:59:58,688 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 245fc14e78adb90753336d94265cf7d5, NAME => 'testtb-testExportWithResetTtl,,1733741960507.245fc14e78adb90753336d94265cf7d5.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => a8b9ff6a0eb0adba92ba641ddcac9ba6, NAME => 'testtb-testExportWithResetTtl,1,1733741960507.a8b9ff6a0eb0adba92ba641ddcac9ba6.', STARTKEY => '1', ENDKEY => ''}] 2024-12-09T10:59:58,688 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithResetTtl' as deleted. 2024-12-09T10:59:58,688 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733741998688"}]},"ts":"9223372036854775807"} 2024-12-09T10:59:58,690 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithResetTtl state from META 2024-12-09T10:59:58,691 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=95, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-09T10:59:58,693 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=95, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl in 67 msec 2024-12-09T10:59:58,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=95 2024-12-09T10:59:58,762 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithResetTtl 2024-12-09T10:59:58,762 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-09T10:59:58,771 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithResetTtl" type: DISABLED 2024-12-09T10:59:58,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithResetTtl 2024-12-09T10:59:58,774 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb-testExportWithResetTtl" type: DISABLED 2024-12-09T10:59:58,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb-testExportWithResetTtl 2024-12-09T10:59:58,777 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithResetTtl" type: DISABLED 2024-12-09T10:59:58,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithResetTtl 2024-12-09T10:59:58,802 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportWithResetTtl Thread=793 (was 782) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46849 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1751713061_22 at /127.0.0.1:34308 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1359318497) connection to localhost/127.0.0.1:46849 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2069471212_1 at /127.0.0.1:60532 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1751713061_22 at /127.0.0.1:43640 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1751713061_22 at /127.0.0.1:49238 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 5579) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2069471212_1 at /127.0.0.1:49222 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-3170 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: ApplicationMasterLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=801 (was 801), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1269 (was 1178) - SystemLoadAverage LEAK? -, ProcessCount=14 (was 14), AvailableMemoryMB=3489 (was 3810) 2024-12-09T10:59:58,802 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=793 is superior to 500 2024-12-09T10:59:58,821 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemState Thread=793, OpenFileDescriptor=801, MaxFileDescriptor=1048576, SystemLoadAverage=1269, ProcessCount=14, AvailableMemoryMB=3488 2024-12-09T10:59:58,821 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=793 is superior to 500 2024-12-09T10:59:58,822 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T10:59:58,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState 2024-12-09T10:59:58,824 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T10:59:58,825 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemState" procId is: 96 2024-12-09T10:59:58,825 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T10:59:58,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-09T10:59:58,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742037_1213 (size=443) 2024-12-09T10:59:58,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742037_1213 (size=443) 2024-12-09T10:59:58,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742037_1213 (size=443) 2024-12-09T10:59:58,837 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => e2e56bbf9999b467c03e19ef66dd5b5c, NAME => 'testtb-testExportFileSystemState,,1733741998822.e2e56bbf9999b467c03e19ef66dd5b5c.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T10:59:58,837 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => cf51409f107b64b6053b05186b328bf2, NAME => 'testtb-testExportFileSystemState,1,1733741998822.cf51409f107b64b6053b05186b328bf2.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T10:59:58,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742038_1214 (size=68) 2024-12-09T10:59:58,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742038_1214 (size=68) 2024-12-09T10:59:58,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742038_1214 (size=68) 2024-12-09T10:59:58,850 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1733741998822.e2e56bbf9999b467c03e19ef66dd5b5c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T10:59:58,851 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing e2e56bbf9999b467c03e19ef66dd5b5c, disabling compactions & flushes 2024-12-09T10:59:58,851 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1733741998822.e2e56bbf9999b467c03e19ef66dd5b5c. 2024-12-09T10:59:58,851 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1733741998822.e2e56bbf9999b467c03e19ef66dd5b5c. 2024-12-09T10:59:58,851 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1733741998822.e2e56bbf9999b467c03e19ef66dd5b5c. after waiting 0 ms 2024-12-09T10:59:58,851 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1733741998822.e2e56bbf9999b467c03e19ef66dd5b5c. 2024-12-09T10:59:58,851 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1733741998822.e2e56bbf9999b467c03e19ef66dd5b5c. 2024-12-09T10:59:58,851 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for e2e56bbf9999b467c03e19ef66dd5b5c: Waiting for close lock at 1733741998851Disabling compacts and flushes for region at 1733741998851Disabling writes for close at 1733741998851Writing region close event to WAL at 1733741998851Closed at 1733741998851 2024-12-09T10:59:58,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742039_1215 (size=68) 2024-12-09T10:59:58,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742039_1215 (size=68) 2024-12-09T10:59:58,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742039_1215 (size=68) 2024-12-09T10:59:58,853 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1733741998822.cf51409f107b64b6053b05186b328bf2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T10:59:58,853 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing cf51409f107b64b6053b05186b328bf2, disabling compactions & flushes 2024-12-09T10:59:58,853 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1733741998822.cf51409f107b64b6053b05186b328bf2. 2024-12-09T10:59:58,853 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1733741998822.cf51409f107b64b6053b05186b328bf2. 2024-12-09T10:59:58,853 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1733741998822.cf51409f107b64b6053b05186b328bf2. after waiting 0 ms 2024-12-09T10:59:58,853 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1733741998822.cf51409f107b64b6053b05186b328bf2. 2024-12-09T10:59:58,853 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1733741998822.cf51409f107b64b6053b05186b328bf2. 2024-12-09T10:59:58,853 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for cf51409f107b64b6053b05186b328bf2: Waiting for close lock at 1733741998853Disabling compacts and flushes for region at 1733741998853Disabling writes for close at 1733741998853Writing region close event to WAL at 1733741998853Closed at 1733741998853 2024-12-09T10:59:58,854 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T10:59:58,855 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,,1733741998822.e2e56bbf9999b467c03e19ef66dd5b5c.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733741998854"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733741998854"}]},"ts":"1733741998854"} 2024-12-09T10:59:58,855 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,1,1733741998822.cf51409f107b64b6053b05186b328bf2.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733741998854"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733741998854"}]},"ts":"1733741998854"} 2024-12-09T10:59:58,858 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-09T10:59:58,859 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T10:59:58,859 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733741998859"}]},"ts":"1733741998859"} 2024-12-09T10:59:58,861 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLING in hbase:meta 2024-12-09T10:59:58,861 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {3469f9ca0af3=0} racks are {/default-rack=0} 2024-12-09T10:59:58,862 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T10:59:58,862 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T10:59:58,862 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T10:59:58,862 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T10:59:58,862 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T10:59:58,862 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T10:59:58,862 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T10:59:58,862 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T10:59:58,862 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T10:59:58,862 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T10:59:58,863 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=e2e56bbf9999b467c03e19ef66dd5b5c, ASSIGN}, {pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=cf51409f107b64b6053b05186b328bf2, ASSIGN}] 2024-12-09T10:59:58,864 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=cf51409f107b64b6053b05186b328bf2, ASSIGN 2024-12-09T10:59:58,864 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=e2e56bbf9999b467c03e19ef66dd5b5c, ASSIGN 2024-12-09T10:59:58,865 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=cf51409f107b64b6053b05186b328bf2, ASSIGN; state=OFFLINE, location=3469f9ca0af3,39691,1733741766880; forceNewPlan=false, retain=false 2024-12-09T10:59:58,865 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=e2e56bbf9999b467c03e19ef66dd5b5c, ASSIGN; state=OFFLINE, location=3469f9ca0af3,42349,1733741767108; forceNewPlan=false, retain=false 2024-12-09T10:59:58,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-09T10:59:59,015 INFO [3469f9ca0af3:35815 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-09T10:59:59,016 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=98 updating hbase:meta row=cf51409f107b64b6053b05186b328bf2, regionState=OPENING, regionLocation=3469f9ca0af3,39691,1733741766880 2024-12-09T10:59:59,016 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=97 updating hbase:meta row=e2e56bbf9999b467c03e19ef66dd5b5c, regionState=OPENING, regionLocation=3469f9ca0af3,42349,1733741767108 2024-12-09T10:59:59,018 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=cf51409f107b64b6053b05186b328bf2, ASSIGN because future has completed 2024-12-09T10:59:59,019 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=99, ppid=98, state=RUNNABLE, hasLock=false; OpenRegionProcedure cf51409f107b64b6053b05186b328bf2, server=3469f9ca0af3,39691,1733741766880}] 2024-12-09T10:59:59,019 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=e2e56bbf9999b467c03e19ef66dd5b5c, ASSIGN because future has completed 2024-12-09T10:59:59,020 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=100, ppid=97, state=RUNNABLE, hasLock=false; OpenRegionProcedure e2e56bbf9999b467c03e19ef66dd5b5c, server=3469f9ca0af3,42349,1733741767108}] 2024-12-09T10:59:59,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-09T10:59:59,174 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,1,1733741998822.cf51409f107b64b6053b05186b328bf2. 2024-12-09T10:59:59,175 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7752): Opening region: {ENCODED => cf51409f107b64b6053b05186b328bf2, NAME => 'testtb-testExportFileSystemState,1,1733741998822.cf51409f107b64b6053b05186b328bf2.', STARTKEY => '1', ENDKEY => ''} 2024-12-09T10:59:59,175 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,1,1733741998822.cf51409f107b64b6053b05186b328bf2. service=AccessControlService 2024-12-09T10:59:59,175 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T10:59:59,176 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState cf51409f107b64b6053b05186b328bf2 2024-12-09T10:59:59,176 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1733741998822.cf51409f107b64b6053b05186b328bf2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T10:59:59,176 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7794): checking encryption for cf51409f107b64b6053b05186b328bf2 2024-12-09T10:59:59,176 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7797): checking classloading for cf51409f107b64b6053b05186b328bf2 2024-12-09T10:59:59,176 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,,1733741998822.e2e56bbf9999b467c03e19ef66dd5b5c. 2024-12-09T10:59:59,176 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7752): Opening region: {ENCODED => e2e56bbf9999b467c03e19ef66dd5b5c, NAME => 'testtb-testExportFileSystemState,,1733741998822.e2e56bbf9999b467c03e19ef66dd5b5c.', STARTKEY => '', ENDKEY => '1'} 2024-12-09T10:59:59,176 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,,1733741998822.e2e56bbf9999b467c03e19ef66dd5b5c. service=AccessControlService 2024-12-09T10:59:59,177 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T10:59:59,177 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState e2e56bbf9999b467c03e19ef66dd5b5c 2024-12-09T10:59:59,177 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1733741998822.e2e56bbf9999b467c03e19ef66dd5b5c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T10:59:59,177 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7794): checking encryption for e2e56bbf9999b467c03e19ef66dd5b5c 2024-12-09T10:59:59,177 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7797): checking classloading for e2e56bbf9999b467c03e19ef66dd5b5c 2024-12-09T10:59:59,177 INFO [StoreOpener-cf51409f107b64b6053b05186b328bf2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region cf51409f107b64b6053b05186b328bf2 2024-12-09T10:59:59,178 INFO [StoreOpener-e2e56bbf9999b467c03e19ef66dd5b5c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region e2e56bbf9999b467c03e19ef66dd5b5c 2024-12-09T10:59:59,179 INFO [StoreOpener-cf51409f107b64b6053b05186b328bf2-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cf51409f107b64b6053b05186b328bf2 columnFamilyName cf 2024-12-09T10:59:59,179 INFO [StoreOpener-e2e56bbf9999b467c03e19ef66dd5b5c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e2e56bbf9999b467c03e19ef66dd5b5c columnFamilyName cf 2024-12-09T10:59:59,180 DEBUG [StoreOpener-cf51409f107b64b6053b05186b328bf2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:59:59,180 DEBUG [StoreOpener-e2e56bbf9999b467c03e19ef66dd5b5c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:59:59,181 INFO [StoreOpener-e2e56bbf9999b467c03e19ef66dd5b5c-1 {}] regionserver.HStore(327): Store=e2e56bbf9999b467c03e19ef66dd5b5c/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T10:59:59,181 INFO [StoreOpener-cf51409f107b64b6053b05186b328bf2-1 {}] regionserver.HStore(327): Store=cf51409f107b64b6053b05186b328bf2/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T10:59:59,181 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1038): replaying wal for e2e56bbf9999b467c03e19ef66dd5b5c 2024-12-09T10:59:59,181 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1038): replaying wal for cf51409f107b64b6053b05186b328bf2 2024-12-09T10:59:59,182 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemState/e2e56bbf9999b467c03e19ef66dd5b5c 2024-12-09T10:59:59,182 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemState/cf51409f107b64b6053b05186b328bf2 2024-12-09T10:59:59,182 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemState/e2e56bbf9999b467c03e19ef66dd5b5c 2024-12-09T10:59:59,182 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemState/cf51409f107b64b6053b05186b328bf2 2024-12-09T10:59:59,182 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1048): stopping wal replay for e2e56bbf9999b467c03e19ef66dd5b5c 2024-12-09T10:59:59,182 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1048): stopping wal replay for cf51409f107b64b6053b05186b328bf2 2024-12-09T10:59:59,182 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1060): Cleaning up temporary data for cf51409f107b64b6053b05186b328bf2 2024-12-09T10:59:59,183 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1060): Cleaning up temporary data for e2e56bbf9999b467c03e19ef66dd5b5c 2024-12-09T10:59:59,184 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1093): writing seq id for e2e56bbf9999b467c03e19ef66dd5b5c 2024-12-09T10:59:59,184 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1093): writing seq id for cf51409f107b64b6053b05186b328bf2 2024-12-09T10:59:59,187 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemState/cf51409f107b64b6053b05186b328bf2/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T10:59:59,187 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemState/e2e56bbf9999b467c03e19ef66dd5b5c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T10:59:59,187 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1114): Opened e2e56bbf9999b467c03e19ef66dd5b5c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62967266, jitterRate=-0.061714619398117065}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T10:59:59,187 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e2e56bbf9999b467c03e19ef66dd5b5c 2024-12-09T10:59:59,188 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1114): Opened cf51409f107b64b6053b05186b328bf2; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61131705, jitterRate=-0.08906660974025726}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T10:59:59,188 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1122): Running coprocessor post-open hooks for cf51409f107b64b6053b05186b328bf2 2024-12-09T10:59:59,188 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1006): Region open journal for cf51409f107b64b6053b05186b328bf2: Running coprocessor pre-open hook at 1733741999176Writing region info on filesystem at 1733741999176Initializing all the Stores at 1733741999177 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733741999177Cleaning up temporary data from old regions at 1733741999183 (+6 ms)Running coprocessor post-open hooks at 1733741999188 (+5 ms)Region opened successfully at 1733741999188 2024-12-09T10:59:59,188 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1006): Region open journal for e2e56bbf9999b467c03e19ef66dd5b5c: Running coprocessor pre-open hook at 1733741999177Writing region info on filesystem at 1733741999177Initializing all the Stores at 1733741999178 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733741999178Cleaning up temporary data from old regions at 1733741999183 (+5 ms)Running coprocessor post-open hooks at 1733741999187 (+4 ms)Region opened successfully at 1733741999188 (+1 ms) 2024-12-09T10:59:59,189 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,,1733741998822.e2e56bbf9999b467c03e19ef66dd5b5c., pid=100, masterSystemTime=1733741999173 2024-12-09T10:59:59,189 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,1,1733741998822.cf51409f107b64b6053b05186b328bf2., pid=99, masterSystemTime=1733741999171 2024-12-09T10:59:59,191 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,,1733741998822.e2e56bbf9999b467c03e19ef66dd5b5c. 2024-12-09T10:59:59,191 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,,1733741998822.e2e56bbf9999b467c03e19ef66dd5b5c. 2024-12-09T10:59:59,192 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=97 updating hbase:meta row=e2e56bbf9999b467c03e19ef66dd5b5c, regionState=OPEN, openSeqNum=2, regionLocation=3469f9ca0af3,42349,1733741767108 2024-12-09T10:59:59,192 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,1,1733741998822.cf51409f107b64b6053b05186b328bf2. 2024-12-09T10:59:59,192 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,1,1733741998822.cf51409f107b64b6053b05186b328bf2. 2024-12-09T10:59:59,193 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=100, ppid=97, state=RUNNABLE, hasLock=false; OpenRegionProcedure e2e56bbf9999b467c03e19ef66dd5b5c, server=3469f9ca0af3,42349,1733741767108 because future has completed 2024-12-09T10:59:59,194 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=98 updating hbase:meta row=cf51409f107b64b6053b05186b328bf2, regionState=OPEN, openSeqNum=2, regionLocation=3469f9ca0af3,39691,1733741766880 2024-12-09T10:59:59,196 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=99, ppid=98, state=RUNNABLE, hasLock=false; OpenRegionProcedure cf51409f107b64b6053b05186b328bf2, server=3469f9ca0af3,39691,1733741766880 because future has completed 2024-12-09T10:59:59,196 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35815 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=3469f9ca0af3,42349,1733741767108, table=testtb-testExportFileSystemState, region=e2e56bbf9999b467c03e19ef66dd5b5c. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-09T10:59:59,208 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=99, resume processing ppid=98 2024-12-09T10:59:59,208 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=99, ppid=98, state=SUCCESS, hasLock=false; OpenRegionProcedure cf51409f107b64b6053b05186b328bf2, server=3469f9ca0af3,39691,1733741766880 in 187 msec 2024-12-09T10:59:59,209 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=100, resume processing ppid=97 2024-12-09T10:59:59,209 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=100, ppid=97, state=SUCCESS, hasLock=false; OpenRegionProcedure e2e56bbf9999b467c03e19ef66dd5b5c, server=3469f9ca0af3,42349,1733741767108 in 185 msec 2024-12-09T10:59:59,210 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=98, ppid=96, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=cf51409f107b64b6053b05186b328bf2, ASSIGN in 345 msec 2024-12-09T10:59:59,211 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=97, resume processing ppid=96 2024-12-09T10:59:59,211 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=97, ppid=96, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=e2e56bbf9999b467c03e19ef66dd5b5c, ASSIGN in 346 msec 2024-12-09T10:59:59,211 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T10:59:59,212 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733741999211"}]},"ts":"1733741999211"} 2024-12-09T10:59:59,213 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLED in hbase:meta 2024-12-09T10:59:59,214 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T10:59:59,214 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemState jenkins: RWXCA 2024-12-09T10:59:59,217 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39691 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-09T10:59:59,219 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T10:59:59,219 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T10:59:59,219 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T10:59:59,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T10:59:59,222 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-09T10:59:59,222 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-09T10:59:59,222 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-09T10:59:59,222 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-09T10:59:59,224 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=96, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState in 400 msec 2024-12-09T10:59:59,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-09T10:59:59,452 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemState completed 2024-12-09T10:59:59,453 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-09T10:59:59,456 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemState 2024-12-09T10:59:59,456 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemState,,1733741998822.e2e56bbf9999b467c03e19ef66dd5b5c. 2024-12-09T10:59:59,457 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T10:59:59,459 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-09T10:59:59,465 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-09T10:59:59,471 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-09T10:59:59,474 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-09T10:59:59,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733741999474 (current time:1733741999474). 2024-12-09T10:59:59,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T10:59:59,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-09T10:59:59,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T10:59:59,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@199331b6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:59:59,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T10:59:59,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T10:59:59,476 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T10:59:59,476 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T10:59:59,476 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T10:59:59,476 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@584a4bb2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:59:59,476 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T10:59:59,476 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T10:59:59,477 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:59:59,477 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51514, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T10:59:59,478 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5bd1bc66, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:59:59,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T10:59:59,479 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,39691,1733741766880, seqNum=-1] 2024-12-09T10:59:59,480 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T10:59:59,480 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49346, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T10:59:59,482 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815. 2024-12-09T10:59:59,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T10:59:59,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:59:59,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:59:59,482 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T10:59:59,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d9aedb4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:59:59,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T10:59:59,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T10:59:59,483 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T10:59:59,484 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T10:59:59,484 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T10:59:59,484 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a736996, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:59:59,484 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T10:59:59,484 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T10:59:59,484 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:59:59,485 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51542, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T10:59:59,485 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@129bcc48, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:59:59,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T10:59:59,487 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,39691,1733741766880, seqNum=-1] 2024-12-09T10:59:59,488 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T10:59:59,489 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49352, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T10:59:59,491 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb., hostname=3469f9ca0af3,39691,1733741766880, seqNum=2] 2024-12-09T10:59:59,493 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815. 2024-12-09T10:59:59,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T10:59:59,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:59:59,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:59:59,493 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T10:59:59,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-09T10:59:59,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T10:59:59,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=101, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-09T10:59:59,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 101 2024-12-09T10:59:59,496 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T10:59:59,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-09T10:59:59,497 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T10:59:59,500 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T10:59:59,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742040_1216 (size=170) 2024-12-09T10:59:59,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742040_1216 (size=170) 2024-12-09T10:59:59,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742040_1216 (size=170) 2024-12-09T10:59:59,509 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T10:59:59,509 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e2e56bbf9999b467c03e19ef66dd5b5c}, {pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cf51409f107b64b6053b05186b328bf2}] 2024-12-09T10:59:59,511 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cf51409f107b64b6053b05186b328bf2 2024-12-09T10:59:59,511 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e2e56bbf9999b467c03e19ef66dd5b5c 2024-12-09T10:59:59,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-09T10:59:59,663 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42349 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=102 2024-12-09T10:59:59,663 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39691 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=103 2024-12-09T10:59:59,663 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1733741998822.e2e56bbf9999b467c03e19ef66dd5b5c. 2024-12-09T10:59:59,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.HRegion(2603): Flush status journal for e2e56bbf9999b467c03e19ef66dd5b5c: 2024-12-09T10:59:59,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1733741998822.e2e56bbf9999b467c03e19ef66dd5b5c. for emptySnaptb0-testExportFileSystemState completed. 2024-12-09T10:59:59,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1733741998822.e2e56bbf9999b467c03e19ef66dd5b5c.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-09T10:59:59,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T10:59:59,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-09T10:59:59,665 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1733741998822.cf51409f107b64b6053b05186b328bf2. 2024-12-09T10:59:59,665 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegion(2603): Flush status journal for cf51409f107b64b6053b05186b328bf2: 2024-12-09T10:59:59,665 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1733741998822.cf51409f107b64b6053b05186b328bf2. for emptySnaptb0-testExportFileSystemState completed. 2024-12-09T10:59:59,666 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1733741998822.cf51409f107b64b6053b05186b328bf2.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-09T10:59:59,666 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T10:59:59,666 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-09T10:59:59,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742041_1217 (size=71) 2024-12-09T10:59:59,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742041_1217 (size=71) 2024-12-09T10:59:59,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742041_1217 (size=71) 2024-12-09T10:59:59,677 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1733741998822.e2e56bbf9999b467c03e19ef66dd5b5c. 2024-12-09T10:59:59,677 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=102 2024-12-09T10:59:59,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=102 2024-12-09T10:59:59,678 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region e2e56bbf9999b467c03e19ef66dd5b5c 2024-12-09T10:59:59,678 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e2e56bbf9999b467c03e19ef66dd5b5c 2024-12-09T10:59:59,683 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=102, ppid=101, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e2e56bbf9999b467c03e19ef66dd5b5c in 173 msec 2024-12-09T10:59:59,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742042_1218 (size=71) 2024-12-09T10:59:59,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742042_1218 (size=71) 2024-12-09T10:59:59,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742042_1218 (size=71) 2024-12-09T10:59:59,688 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1733741998822.cf51409f107b64b6053b05186b328bf2. 2024-12-09T10:59:59,688 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-12-09T10:59:59,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=103 2024-12-09T10:59:59,689 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region cf51409f107b64b6053b05186b328bf2 2024-12-09T10:59:59,689 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cf51409f107b64b6053b05186b328bf2 2024-12-09T10:59:59,692 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=103, resume processing ppid=101 2024-12-09T10:59:59,692 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=103, ppid=101, state=SUCCESS, hasLock=false; SnapshotRegionProcedure cf51409f107b64b6053b05186b328bf2 in 181 msec 2024-12-09T10:59:59,692 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T10:59:59,693 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T10:59:59,694 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-09T10:59:59,694 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-09T10:59:59,694 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T10:59:59,695 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-09T10:59:59,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742043_1219 (size=63) 2024-12-09T10:59:59,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742043_1219 (size=63) 2024-12-09T10:59:59,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742043_1219 (size=63) 2024-12-09T10:59:59,706 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T10:59:59,706 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemState 2024-12-09T10:59:59,707 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState 2024-12-09T10:59:59,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742044_1220 (size=653) 2024-12-09T10:59:59,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742044_1220 (size=653) 2024-12-09T10:59:59,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742044_1220 (size=653) 2024-12-09T10:59:59,728 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T10:59:59,734 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T10:59:59,734 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/emptySnaptb0-testExportFileSystemState 2024-12-09T10:59:59,736 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T10:59:59,736 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 101 2024-12-09T10:59:59,738 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=101, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 242 msec 2024-12-09T10:59:59,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-09T10:59:59,812 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-12-09T10:59:59,820 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42349 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,,1733741998822.e2e56bbf9999b467c03e19ef66dd5b5c. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T10:59:59,822 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39691 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,1,1733741998822.cf51409f107b64b6053b05186b328bf2. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T10:59:59,824 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-09T10:59:59,826 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemState 2024-12-09T10:59:59,826 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemState,,1733741998822.e2e56bbf9999b467c03e19ef66dd5b5c. 2024-12-09T10:59:59,827 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T10:59:59,829 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-09T10:59:59,835 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-09T10:59:59,840 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-09T10:59:59,843 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-09T10:59:59,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733741999843 (current time:1733741999843). 2024-12-09T10:59:59,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T10:59:59,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-09T10:59:59,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T10:59:59,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57ff68b4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:59:59,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T10:59:59,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T10:59:59,845 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T10:59:59,845 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T10:59:59,845 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T10:59:59,845 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2871218, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:59:59,845 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T10:59:59,846 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T10:59:59,846 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:59:59,846 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51556, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T10:59:59,847 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b7c9ee7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:59:59,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T10:59:59,848 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,39691,1733741766880, seqNum=-1] 2024-12-09T10:59:59,848 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T10:59:59,849 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49364, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T10:59:59,850 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815. 2024-12-09T10:59:59,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T10:59:59,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:59:59,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:59:59,850 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T10:59:59,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@246a9b28, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:59:59,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T10:59:59,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T10:59:59,852 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T10:59:59,852 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T10:59:59,852 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T10:59:59,852 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@56e07338, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:59:59,853 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T10:59:59,853 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T10:59:59,853 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:59:59,853 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51568, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T10:59:59,854 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@527fce2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T10:59:59,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T10:59:59,855 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,39691,1733741766880, seqNum=-1] 2024-12-09T10:59:59,855 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T10:59:59,856 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49368, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T10:59:59,858 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb., hostname=3469f9ca0af3,39691,1733741766880, seqNum=2] 2024-12-09T10:59:59,859 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815. 2024-12-09T10:59:59,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T10:59:59,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:59:59,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T10:59:59,859 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T10:59:59,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-09T10:59:59,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T10:59:59,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-09T10:59:59,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-09T10:59:59,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-09T10:59:59,866 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T10:59:59,867 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T10:59:59,870 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T10:59:59,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742045_1221 (size=165) 2024-12-09T10:59:59,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742045_1221 (size=165) 2024-12-09T10:59:59,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742045_1221 (size=165) 2024-12-09T10:59:59,879 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T10:59:59,879 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e2e56bbf9999b467c03e19ef66dd5b5c}, {pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cf51409f107b64b6053b05186b328bf2}] 2024-12-09T10:59:59,880 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e2e56bbf9999b467c03e19ef66dd5b5c 2024-12-09T10:59:59,880 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cf51409f107b64b6053b05186b328bf2 2024-12-09T10:59:59,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-09T11:00:00,032 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42349 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=105 2024-12-09T11:00:00,032 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39691 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=106 2024-12-09T11:00:00,032 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1733741998822.e2e56bbf9999b467c03e19ef66dd5b5c. 2024-12-09T11:00:00,032 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1733741998822.cf51409f107b64b6053b05186b328bf2. 2024-12-09T11:00:00,033 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2902): Flushing e2e56bbf9999b467c03e19ef66dd5b5c 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-09T11:00:00,033 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2902): Flushing cf51409f107b64b6053b05186b328bf2 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-09T11:00:00,053 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024120924918517d06c4d9a94161c0c92267a82_cf51409f107b64b6053b05186b328bf2 is 71, key is 10388d2eccbb9c7fe70451b6cd262160/cf:q/1733741999822/Put/seqid=0 2024-12-09T11:00:00,059 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209730debba470f42cdbdbb3fb5f28284d4_e2e56bbf9999b467c03e19ef66dd5b5c is 71, key is 0426cffe13f57112c39fc51bcf6e202b/cf:q/1733741999819/Put/seqid=0 2024-12-09T11:00:00,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742046_1222 (size=8171) 2024-12-09T11:00:00,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742046_1222 (size=8171) 2024-12-09T11:00:00,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742046_1222 (size=8171) 2024-12-09T11:00:00,076 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:00:00,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742047_1223 (size=5101) 2024-12-09T11:00:00,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742047_1223 (size=5101) 2024-12-09T11:00:00,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742047_1223 (size=5101) 2024-12-09T11:00:00,088 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:00:00,090 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024120924918517d06c4d9a94161c0c92267a82_cf51409f107b64b6053b05186b328bf2 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b2024120924918517d06c4d9a94161c0c92267a82_cf51409f107b64b6053b05186b328bf2 2024-12-09T11:00:00,091 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemState/cf51409f107b64b6053b05186b328bf2/.tmp/cf/bae0ff9b21e743f28ff34ed55b8b1670, store: [table=testtb-testExportFileSystemState family=cf region=cf51409f107b64b6053b05186b328bf2] 2024-12-09T11:00:00,092 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemState/cf51409f107b64b6053b05186b328bf2/.tmp/cf/bae0ff9b21e743f28ff34ed55b8b1670 is 209, key is 170715d4d6f9defa698b6ee600e98d046/cf:q/1733741999822/Put/seqid=0 2024-12-09T11:00:00,093 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209730debba470f42cdbdbb3fb5f28284d4_e2e56bbf9999b467c03e19ef66dd5b5c to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e20241209730debba470f42cdbdbb3fb5f28284d4_e2e56bbf9999b467c03e19ef66dd5b5c 2024-12-09T11:00:00,098 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemState/e2e56bbf9999b467c03e19ef66dd5b5c/.tmp/cf/b5a3cb5ff3b94b04817978be5a728f96, store: [table=testtb-testExportFileSystemState family=cf region=e2e56bbf9999b467c03e19ef66dd5b5c] 2024-12-09T11:00:00,098 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemState/e2e56bbf9999b467c03e19ef66dd5b5c/.tmp/cf/b5a3cb5ff3b94b04817978be5a728f96 is 209, key is 059ecc462ac27c44eef93bc6b753010fd/cf:q/1733741999819/Put/seqid=0 2024-12-09T11:00:00,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742048_1224 (size=14997) 2024-12-09T11:00:00,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742048_1224 (size=14997) 2024-12-09T11:00:00,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742048_1224 (size=14997) 2024-12-09T11:00:00,102 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemState/cf51409f107b64b6053b05186b328bf2/.tmp/cf/bae0ff9b21e743f28ff34ed55b8b1670 2024-12-09T11:00:00,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742049_1225 (size=5916) 2024-12-09T11:00:00,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742049_1225 (size=5916) 2024-12-09T11:00:00,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742049_1225 (size=5916) 2024-12-09T11:00:00,105 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=199, hasBloomFilter=true, into tmp file hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemState/e2e56bbf9999b467c03e19ef66dd5b5c/.tmp/cf/b5a3cb5ff3b94b04817978be5a728f96 2024-12-09T11:00:00,108 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemState/cf51409f107b64b6053b05186b328bf2/.tmp/cf/bae0ff9b21e743f28ff34ed55b8b1670 as hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemState/cf51409f107b64b6053b05186b328bf2/cf/bae0ff9b21e743f28ff34ed55b8b1670 2024-12-09T11:00:00,111 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemState/e2e56bbf9999b467c03e19ef66dd5b5c/.tmp/cf/b5a3cb5ff3b94b04817978be5a728f96 as hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemState/e2e56bbf9999b467c03e19ef66dd5b5c/cf/b5a3cb5ff3b94b04817978be5a728f96 2024-12-09T11:00:00,113 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemState/cf51409f107b64b6053b05186b328bf2/cf/bae0ff9b21e743f28ff34ed55b8b1670, entries=47, sequenceid=6, filesize=14.6 K 2024-12-09T11:00:00,114 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for cf51409f107b64b6053b05186b328bf2 in 81ms, sequenceid=6, compaction requested=false 2024-12-09T11:00:00,114 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemState' 2024-12-09T11:00:00,115 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2603): Flush status journal for cf51409f107b64b6053b05186b328bf2: 2024-12-09T11:00:00,115 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1733741998822.cf51409f107b64b6053b05186b328bf2. for snaptb0-testExportFileSystemState completed. 2024-12-09T11:00:00,115 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1733741998822.cf51409f107b64b6053b05186b328bf2.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-09T11:00:00,115 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T11:00:00,115 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemState/cf51409f107b64b6053b05186b328bf2/cf/bae0ff9b21e743f28ff34ed55b8b1670] hfiles 2024-12-09T11:00:00,115 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemState/cf51409f107b64b6053b05186b328bf2/cf/bae0ff9b21e743f28ff34ed55b8b1670 for snapshot=snaptb0-testExportFileSystemState 2024-12-09T11:00:00,117 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemState/e2e56bbf9999b467c03e19ef66dd5b5c/cf/b5a3cb5ff3b94b04817978be5a728f96, entries=3, sequenceid=6, filesize=5.8 K 2024-12-09T11:00:00,128 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for e2e56bbf9999b467c03e19ef66dd5b5c in 95ms, sequenceid=6, compaction requested=false 2024-12-09T11:00:00,128 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2603): Flush status journal for e2e56bbf9999b467c03e19ef66dd5b5c: 2024-12-09T11:00:00,128 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1733741998822.e2e56bbf9999b467c03e19ef66dd5b5c. for snaptb0-testExportFileSystemState completed. 2024-12-09T11:00:00,129 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1733741998822.e2e56bbf9999b467c03e19ef66dd5b5c.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-09T11:00:00,129 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T11:00:00,129 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemState/e2e56bbf9999b467c03e19ef66dd5b5c/cf/b5a3cb5ff3b94b04817978be5a728f96] hfiles 2024-12-09T11:00:00,129 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemState/e2e56bbf9999b467c03e19ef66dd5b5c/cf/b5a3cb5ff3b94b04817978be5a728f96 for snapshot=snaptb0-testExportFileSystemState 2024-12-09T11:00:00,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742050_1226 (size=110) 2024-12-09T11:00:00,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742050_1226 (size=110) 2024-12-09T11:00:00,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742050_1226 (size=110) 2024-12-09T11:00:00,133 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1733741998822.cf51409f107b64b6053b05186b328bf2. 2024-12-09T11:00:00,133 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=106 2024-12-09T11:00:00,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=106 2024-12-09T11:00:00,133 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region cf51409f107b64b6053b05186b328bf2 2024-12-09T11:00:00,133 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cf51409f107b64b6053b05186b328bf2 2024-12-09T11:00:00,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742051_1227 (size=110) 2024-12-09T11:00:00,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742051_1227 (size=110) 2024-12-09T11:00:00,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742051_1227 (size=110) 2024-12-09T11:00:00,143 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1733741998822.e2e56bbf9999b467c03e19ef66dd5b5c. 2024-12-09T11:00:00,143 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-12-09T11:00:00,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=105 2024-12-09T11:00:00,144 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region e2e56bbf9999b467c03e19ef66dd5b5c 2024-12-09T11:00:00,148 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e2e56bbf9999b467c03e19ef66dd5b5c 2024-12-09T11:00:00,150 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=106, ppid=104, state=SUCCESS, hasLock=false; SnapshotRegionProcedure cf51409f107b64b6053b05186b328bf2 in 264 msec 2024-12-09T11:00:00,152 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=105, resume processing ppid=104 2024-12-09T11:00:00,152 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=105, ppid=104, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e2e56bbf9999b467c03e19ef66dd5b5c in 271 msec 2024-12-09T11:00:00,153 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T11:00:00,157 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T11:00:00,159 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-09T11:00:00,159 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-09T11:00:00,159 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:00:00,161 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b2024120924918517d06c4d9a94161c0c92267a82_cf51409f107b64b6053b05186b328bf2, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e20241209730debba470f42cdbdbb3fb5f28284d4_e2e56bbf9999b467c03e19ef66dd5b5c] hfiles 2024-12-09T11:00:00,161 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b2024120924918517d06c4d9a94161c0c92267a82_cf51409f107b64b6053b05186b328bf2 2024-12-09T11:00:00,161 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e20241209730debba470f42cdbdbb3fb5f28284d4_e2e56bbf9999b467c03e19ef66dd5b5c 2024-12-09T11:00:00,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742052_1228 (size=294) 2024-12-09T11:00:00,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742052_1228 (size=294) 2024-12-09T11:00:00,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742052_1228 (size=294) 2024-12-09T11:00:00,173 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T11:00:00,173 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemState 2024-12-09T11:00:00,174 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-09T11:00:00,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-09T11:00:00,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742053_1229 (size=963) 2024-12-09T11:00:00,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742053_1229 (size=963) 2024-12-09T11:00:00,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742053_1229 (size=963) 2024-12-09T11:00:00,201 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T11:00:00,216 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T11:00:00,216 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-09T11:00:00,217 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T11:00:00,218 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-09T11:00:00,219 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=104, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 358 msec 2024-12-09T11:00:00,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-09T11:00:00,493 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-12-09T11:00:00,493 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742000493 2024-12-09T11:00:00,493 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:35869, tgtDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742000493, rawTgtDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742000493, srcFsUri=hdfs://localhost:35869, srcDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T11:00:00,556 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:35869, inputRoot=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T11:00:00,556 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1751713061_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742000493, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742000493/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-09T11:00:00,559 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-09T11:00:00,568 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testExportFileSystemState to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742000493/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-09T11:00:00,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742054_1230 (size=165) 2024-12-09T11:00:00,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742054_1230 (size=165) 2024-12-09T11:00:00,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742054_1230 (size=165) 2024-12-09T11:00:00,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742055_1231 (size=963) 2024-12-09T11:00:00,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742055_1231 (size=963) 2024-12-09T11:00:00,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742055_1231 (size=963) 2024-12-09T11:00:00,610 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:00:00,611 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:00:00,611 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:00:00,807 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_0/usercache/jenkins/appcache/application_1733741775522_0003/container_1733741775522_0003_01_000003/launch_container.sh] 2024-12-09T11:00:00,807 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_0/usercache/jenkins/appcache/application_1733741775522_0003/container_1733741775522_0003_01_000003/container_tokens] 2024-12-09T11:00:00,807 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_0/usercache/jenkins/appcache/application_1733741775522_0003/container_1733741775522_0003_01_000003/sysfs] 2024-12-09T11:00:01,851 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/hadoop-4392685814413466321.jar 2024-12-09T11:00:01,851 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:00:01,852 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:00:01,933 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/hadoop-162873786408186970.jar 2024-12-09T11:00:01,934 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:00:01,934 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:00:01,935 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:00:01,935 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:00:01,935 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:00:01,936 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:00:01,936 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-09T11:00:01,936 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-09T11:00:01,937 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-09T11:00:01,937 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-09T11:00:01,937 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-09T11:00:01,938 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-09T11:00:01,938 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-09T11:00:01,938 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-09T11:00:01,939 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-09T11:00:01,939 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-09T11:00:01,939 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-09T11:00:01,940 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T11:00:01,940 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T11:00:01,940 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T11:00:01,941 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T11:00:01,941 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T11:00:01,942 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T11:00:01,942 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T11:00:02,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742056_1232 (size=24020) 2024-12-09T11:00:02,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742056_1232 (size=24020) 2024-12-09T11:00:02,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742056_1232 (size=24020) 2024-12-09T11:00:02,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742057_1233 (size=77755) 2024-12-09T11:00:02,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742057_1233 (size=77755) 2024-12-09T11:00:02,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742057_1233 (size=77755) 2024-12-09T11:00:02,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742058_1234 (size=131360) 2024-12-09T11:00:02,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742058_1234 (size=131360) 2024-12-09T11:00:02,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742058_1234 (size=131360) 2024-12-09T11:00:02,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742059_1235 (size=111793) 2024-12-09T11:00:02,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742059_1235 (size=111793) 2024-12-09T11:00:02,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742059_1235 (size=111793) 2024-12-09T11:00:02,233 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0003_000001 (auth:SIMPLE) from 127.0.0.1:51770 2024-12-09T11:00:02,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742060_1236 (size=1832290) 2024-12-09T11:00:02,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742060_1236 (size=1832290) 2024-12-09T11:00:02,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742060_1236 (size=1832290) 2024-12-09T11:00:02,272 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_1/usercache/jenkins/appcache/application_1733741775522_0003/container_1733741775522_0003_01_000001/launch_container.sh] 2024-12-09T11:00:02,272 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_1/usercache/jenkins/appcache/application_1733741775522_0003/container_1733741775522_0003_01_000001/container_tokens] 2024-12-09T11:00:02,273 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_1/usercache/jenkins/appcache/application_1733741775522_0003/container_1733741775522_0003_01_000001/sysfs] 2024-12-09T11:00:02,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742061_1237 (size=8360282) 2024-12-09T11:00:02,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742061_1237 (size=8360282) 2024-12-09T11:00:02,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742061_1237 (size=8360282) 2024-12-09T11:00:02,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742062_1238 (size=503880) 2024-12-09T11:00:02,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742062_1238 (size=503880) 2024-12-09T11:00:02,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742062_1238 (size=503880) 2024-12-09T11:00:02,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742063_1239 (size=322274) 2024-12-09T11:00:02,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742063_1239 (size=322274) 2024-12-09T11:00:02,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742063_1239 (size=322274) 2024-12-09T11:00:02,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742064_1240 (size=20406) 2024-12-09T11:00:02,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742064_1240 (size=20406) 2024-12-09T11:00:02,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742064_1240 (size=20406) 2024-12-09T11:00:02,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742065_1241 (size=6425021) 2024-12-09T11:00:02,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742065_1241 (size=6425021) 2024-12-09T11:00:02,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742065_1241 (size=6425021) 2024-12-09T11:00:02,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742066_1242 (size=45609) 2024-12-09T11:00:02,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742066_1242 (size=45609) 2024-12-09T11:00:02,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742066_1242 (size=45609) 2024-12-09T11:00:02,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742067_1243 (size=136454) 2024-12-09T11:00:02,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742067_1243 (size=136454) 2024-12-09T11:00:02,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742067_1243 (size=136454) 2024-12-09T11:00:02,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742068_1244 (size=1597136) 2024-12-09T11:00:02,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742068_1244 (size=1597136) 2024-12-09T11:00:03,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742068_1244 (size=1597136) 2024-12-09T11:00:03,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742069_1245 (size=30873) 2024-12-09T11:00:03,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742069_1245 (size=30873) 2024-12-09T11:00:03,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742069_1245 (size=30873) 2024-12-09T11:00:03,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742070_1246 (size=29229) 2024-12-09T11:00:03,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742070_1246 (size=29229) 2024-12-09T11:00:03,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742070_1246 (size=29229) 2024-12-09T11:00:03,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742071_1247 (size=903861) 2024-12-09T11:00:03,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742071_1247 (size=903861) 2024-12-09T11:00:03,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742071_1247 (size=903861) 2024-12-09T11:00:03,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742072_1248 (size=5175431) 2024-12-09T11:00:03,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742072_1248 (size=5175431) 2024-12-09T11:00:03,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742072_1248 (size=5175431) 2024-12-09T11:00:03,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742073_1249 (size=232881) 2024-12-09T11:00:03,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742073_1249 (size=232881) 2024-12-09T11:00:03,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742073_1249 (size=232881) 2024-12-09T11:00:03,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742074_1250 (size=1323991) 2024-12-09T11:00:03,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742074_1250 (size=1323991) 2024-12-09T11:00:03,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742074_1250 (size=1323991) 2024-12-09T11:00:03,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742075_1251 (size=4695811) 2024-12-09T11:00:03,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742075_1251 (size=4695811) 2024-12-09T11:00:03,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742075_1251 (size=4695811) 2024-12-09T11:00:03,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742076_1252 (size=1877034) 2024-12-09T11:00:03,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742076_1252 (size=1877034) 2024-12-09T11:00:03,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742076_1252 (size=1877034) 2024-12-09T11:00:03,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742077_1253 (size=217555) 2024-12-09T11:00:03,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742077_1253 (size=217555) 2024-12-09T11:00:03,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742077_1253 (size=217555) 2024-12-09T11:00:03,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742078_1254 (size=4188619) 2024-12-09T11:00:03,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742078_1254 (size=4188619) 2024-12-09T11:00:03,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742078_1254 (size=4188619) 2024-12-09T11:00:03,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742079_1255 (size=127628) 2024-12-09T11:00:03,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742079_1255 (size=127628) 2024-12-09T11:00:03,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742079_1255 (size=127628) 2024-12-09T11:00:03,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742080_1256 (size=443171) 2024-12-09T11:00:03,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742080_1256 (size=443171) 2024-12-09T11:00:03,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742080_1256 (size=443171) 2024-12-09T11:00:03,354 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-09T11:00:03,356 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemState' hfile list 2024-12-09T11:00:03,358 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.6 K 2024-12-09T11:00:03,358 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.0 K 2024-12-09T11:00:03,358 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.8 K 2024-12-09T11:00:03,358 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.0 K 2024-12-09T11:00:03,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742081_1257 (size=1035) 2024-12-09T11:00:03,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742081_1257 (size=1035) 2024-12-09T11:00:03,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742081_1257 (size=1035) 2024-12-09T11:00:03,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742082_1258 (size=35) 2024-12-09T11:00:03,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742082_1258 (size=35) 2024-12-09T11:00:03,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742082_1258 (size=35) 2024-12-09T11:00:03,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742083_1259 (size=304081) 2024-12-09T11:00:03,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742083_1259 (size=304081) 2024-12-09T11:00:03,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742083_1259 (size=304081) 2024-12-09T11:00:03,413 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T11:00:03,413 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T11:00:03,603 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T11:00:04,032 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0004_000001 (auth:SIMPLE) from 127.0.0.1:60952 2024-12-09T11:00:04,500 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T11:00:06,470 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-09T11:00:06,471 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-09T11:00:06,472 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-09T11:00:06,472 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-09T11:00:09,143 WARN [regionserver/3469f9ca0af3:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 6, running: 0 2024-12-09T11:00:11,975 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T11:00:14,762 DEBUG [master/3469f9ca0af3:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region cf51409f107b64b6053b05186b328bf2 changed from -1.0 to 0.0, refreshing cache 2024-12-09T11:00:14,762 DEBUG [master/3469f9ca0af3:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region e2e56bbf9999b467c03e19ef66dd5b5c changed from -1.0 to 0.0, refreshing cache 2024-12-09T11:00:16,116 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0004_000001 (auth:SIMPLE) from 127.0.0.1:38264 2024-12-09T11:00:16,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742084_1260 (size=349779) 2024-12-09T11:00:16,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742084_1260 (size=349779) 2024-12-09T11:00:16,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742084_1260 (size=349779) 2024-12-09T11:00:18,444 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0004_000001 (auth:SIMPLE) from 127.0.0.1:41340 2024-12-09T11:00:18,450 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0004_000001 (auth:SIMPLE) from 127.0.0.1:45512 2024-12-09T11:00:19,255 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0004_000001 (auth:SIMPLE) from 127.0.0.1:39522 2024-12-09T11:00:19,266 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0004_000001 (auth:SIMPLE) from 127.0.0.1:42160 2024-12-09T11:00:22,238 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733741775522_0004_01_000006 while processing FINISH_CONTAINERS event 2024-12-09T11:00:22,238 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733741775522_0004_01_000007 while processing FINISH_CONTAINERS event 2024-12-09T11:00:26,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742085_1261 (size=8171) 2024-12-09T11:00:26,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742085_1261 (size=8171) 2024-12-09T11:00:26,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742085_1261 (size=8171) 2024-12-09T11:00:26,739 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_0/usercache/jenkins/appcache/application_1733741775522_0004/container_1733741775522_0004_01_000003/launch_container.sh] 2024-12-09T11:00:26,739 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_0/usercache/jenkins/appcache/application_1733741775522_0004/container_1733741775522_0004_01_000003/container_tokens] 2024-12-09T11:00:26,740 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_0/usercache/jenkins/appcache/application_1733741775522_0004/container_1733741775522_0004_01_000003/sysfs] 2024-12-09T11:00:27,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742087_1263 (size=5101) 2024-12-09T11:00:27,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742087_1263 (size=5101) 2024-12-09T11:00:27,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742087_1263 (size=5101) 2024-12-09T11:00:28,137 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_2/usercache/jenkins/appcache/application_1733741775522_0004/container_1733741775522_0004_01_000005/launch_container.sh] 2024-12-09T11:00:28,137 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_2/usercache/jenkins/appcache/application_1733741775522_0004/container_1733741775522_0004_01_000005/container_tokens] 2024-12-09T11:00:28,137 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_2/usercache/jenkins/appcache/application_1733741775522_0004/container_1733741775522_0004_01_000005/sysfs] 2024-12-09T11:00:28,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742088_1264 (size=5916) 2024-12-09T11:00:28,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742088_1264 (size=5916) 2024-12-09T11:00:28,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742088_1264 (size=5916) 2024-12-09T11:00:28,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742089_1265 (size=14997) 2024-12-09T11:00:28,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742089_1265 (size=14997) 2024-12-09T11:00:28,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742089_1265 (size=14997) 2024-12-09T11:00:28,486 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_3/usercache/jenkins/appcache/application_1733741775522_0004/container_1733741775522_0004_01_000004/launch_container.sh] 2024-12-09T11:00:28,487 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_3/usercache/jenkins/appcache/application_1733741775522_0004/container_1733741775522_0004_01_000004/container_tokens] 2024-12-09T11:00:28,487 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_3/usercache/jenkins/appcache/application_1733741775522_0004/container_1733741775522_0004_01_000004/sysfs] 2024-12-09T11:00:28,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742086_1262 (size=31751) 2024-12-09T11:00:28,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742086_1262 (size=31751) 2024-12-09T11:00:28,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742086_1262 (size=31751) 2024-12-09T11:00:28,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742090_1266 (size=466) 2024-12-09T11:00:28,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742090_1266 (size=466) 2024-12-09T11:00:28,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742090_1266 (size=466) 2024-12-09T11:00:28,613 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_0/usercache/jenkins/appcache/application_1733741775522_0004/container_1733741775522_0004_01_000002/launch_container.sh] 2024-12-09T11:00:28,613 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_0/usercache/jenkins/appcache/application_1733741775522_0004/container_1733741775522_0004_01_000002/container_tokens] 2024-12-09T11:00:28,613 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_0/usercache/jenkins/appcache/application_1733741775522_0004/container_1733741775522_0004_01_000002/sysfs] 2024-12-09T11:00:28,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742091_1267 (size=31751) 2024-12-09T11:00:28,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742091_1267 (size=31751) 2024-12-09T11:00:28,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742091_1267 (size=31751) 2024-12-09T11:00:28,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742092_1268 (size=349779) 2024-12-09T11:00:28,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742092_1268 (size=349779) 2024-12-09T11:00:28,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742092_1268 (size=349779) 2024-12-09T11:00:28,654 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0004_000001 (auth:SIMPLE) from 127.0.0.1:53344 2024-12-09T11:00:28,661 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0004_000001 (auth:SIMPLE) from 127.0.0.1:53346 2024-12-09T11:00:29,953 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-09T11:00:29,955 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-09T11:00:29,962 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemState 2024-12-09T11:00:29,962 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-09T11:00:29,962 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-09T11:00:29,963 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1751713061_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-09T11:00:29,963 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-09T11:00:29,963 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-09T11:00:29,963 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1751713061_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742000493/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742000493/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-09T11:00:29,963 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742000493/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-09T11:00:29,963 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742000493/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-09T11:00:29,971 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemState 2024-12-09T11:00:29,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=107, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState 2024-12-09T11:00:29,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-12-09T11:00:29,975 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733742029975"}]},"ts":"1733742029975"} 2024-12-09T11:00:29,977 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLING in hbase:meta 2024-12-09T11:00:29,977 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemState to state=DISABLING 2024-12-09T11:00:29,978 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=108, ppid=107, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState}] 2024-12-09T11:00:29,979 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=e2e56bbf9999b467c03e19ef66dd5b5c, UNASSIGN}, {pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=cf51409f107b64b6053b05186b328bf2, UNASSIGN}] 2024-12-09T11:00:29,980 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=cf51409f107b64b6053b05186b328bf2, UNASSIGN 2024-12-09T11:00:29,980 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=e2e56bbf9999b467c03e19ef66dd5b5c, UNASSIGN 2024-12-09T11:00:29,981 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=110 updating hbase:meta row=cf51409f107b64b6053b05186b328bf2, regionState=CLOSING, regionLocation=3469f9ca0af3,39691,1733741766880 2024-12-09T11:00:29,981 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=109 updating hbase:meta row=e2e56bbf9999b467c03e19ef66dd5b5c, regionState=CLOSING, regionLocation=3469f9ca0af3,42349,1733741767108 2024-12-09T11:00:29,983 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=cf51409f107b64b6053b05186b328bf2, UNASSIGN because future has completed 2024-12-09T11:00:29,983 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T11:00:29,983 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE, hasLock=false; CloseRegionProcedure cf51409f107b64b6053b05186b328bf2, server=3469f9ca0af3,39691,1733741766880}] 2024-12-09T11:00:29,984 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=e2e56bbf9999b467c03e19ef66dd5b5c, UNASSIGN because future has completed 2024-12-09T11:00:29,984 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T11:00:29,984 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=112, ppid=109, state=RUNNABLE, hasLock=false; CloseRegionProcedure e2e56bbf9999b467c03e19ef66dd5b5c, server=3469f9ca0af3,42349,1733741767108}] 2024-12-09T11:00:30,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-12-09T11:00:30,136 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(122): Close e2e56bbf9999b467c03e19ef66dd5b5c 2024-12-09T11:00:30,136 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(122): Close cf51409f107b64b6053b05186b328bf2 2024-12-09T11:00:30,136 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T11:00:30,136 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T11:00:30,136 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1722): Closing e2e56bbf9999b467c03e19ef66dd5b5c, disabling compactions & flushes 2024-12-09T11:00:30,136 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1722): Closing cf51409f107b64b6053b05186b328bf2, disabling compactions & flushes 2024-12-09T11:00:30,136 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1733741998822.e2e56bbf9999b467c03e19ef66dd5b5c. 2024-12-09T11:00:30,136 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1733741998822.cf51409f107b64b6053b05186b328bf2. 2024-12-09T11:00:30,136 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1733741998822.e2e56bbf9999b467c03e19ef66dd5b5c. 2024-12-09T11:00:30,136 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1733741998822.e2e56bbf9999b467c03e19ef66dd5b5c. after waiting 0 ms 2024-12-09T11:00:30,136 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1733741998822.cf51409f107b64b6053b05186b328bf2. 2024-12-09T11:00:30,136 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1733741998822.e2e56bbf9999b467c03e19ef66dd5b5c. 2024-12-09T11:00:30,136 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1733741998822.cf51409f107b64b6053b05186b328bf2. after waiting 0 ms 2024-12-09T11:00:30,136 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1733741998822.cf51409f107b64b6053b05186b328bf2. 2024-12-09T11:00:30,143 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemState/cf51409f107b64b6053b05186b328bf2/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T11:00:30,144 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T11:00:30,144 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1733741998822.cf51409f107b64b6053b05186b328bf2. 2024-12-09T11:00:30,144 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1676): Region close journal for cf51409f107b64b6053b05186b328bf2: Waiting for close lock at 1733742030136Running coprocessor pre-close hooks at 1733742030136Disabling compacts and flushes for region at 1733742030136Disabling writes for close at 1733742030136Writing region close event to WAL at 1733742030137 (+1 ms)Running coprocessor post-close hooks at 1733742030144 (+7 ms)Closed at 1733742030144 2024-12-09T11:00:30,146 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemState/e2e56bbf9999b467c03e19ef66dd5b5c/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T11:00:30,146 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T11:00:30,146 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1733741998822.e2e56bbf9999b467c03e19ef66dd5b5c. 2024-12-09T11:00:30,146 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1676): Region close journal for e2e56bbf9999b467c03e19ef66dd5b5c: Waiting for close lock at 1733742030136Running coprocessor pre-close hooks at 1733742030136Disabling compacts and flushes for region at 1733742030136Disabling writes for close at 1733742030136Writing region close event to WAL at 1733742030137 (+1 ms)Running coprocessor post-close hooks at 1733742030146 (+9 ms)Closed at 1733742030146 2024-12-09T11:00:30,148 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(157): Closed cf51409f107b64b6053b05186b328bf2 2024-12-09T11:00:30,149 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=110 updating hbase:meta row=cf51409f107b64b6053b05186b328bf2, regionState=CLOSED 2024-12-09T11:00:30,149 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(157): Closed e2e56bbf9999b467c03e19ef66dd5b5c 2024-12-09T11:00:30,150 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=109 updating hbase:meta row=e2e56bbf9999b467c03e19ef66dd5b5c, regionState=CLOSED 2024-12-09T11:00:30,151 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=111, ppid=110, state=RUNNABLE, hasLock=false; CloseRegionProcedure cf51409f107b64b6053b05186b328bf2, server=3469f9ca0af3,39691,1733741766880 because future has completed 2024-12-09T11:00:30,152 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=112, ppid=109, state=RUNNABLE, hasLock=false; CloseRegionProcedure e2e56bbf9999b467c03e19ef66dd5b5c, server=3469f9ca0af3,42349,1733741767108 because future has completed 2024-12-09T11:00:30,154 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=111, resume processing ppid=110 2024-12-09T11:00:30,154 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=111, ppid=110, state=SUCCESS, hasLock=false; CloseRegionProcedure cf51409f107b64b6053b05186b328bf2, server=3469f9ca0af3,39691,1733741766880 in 169 msec 2024-12-09T11:00:30,156 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=110, ppid=108, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=cf51409f107b64b6053b05186b328bf2, UNASSIGN in 175 msec 2024-12-09T11:00:30,156 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=112, resume processing ppid=109 2024-12-09T11:00:30,156 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=112, ppid=109, state=SUCCESS, hasLock=false; CloseRegionProcedure e2e56bbf9999b467c03e19ef66dd5b5c, server=3469f9ca0af3,42349,1733741767108 in 169 msec 2024-12-09T11:00:30,158 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=109, resume processing ppid=108 2024-12-09T11:00:30,158 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=109, ppid=108, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=e2e56bbf9999b467c03e19ef66dd5b5c, UNASSIGN in 177 msec 2024-12-09T11:00:30,160 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=108, resume processing ppid=107 2024-12-09T11:00:30,160 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=108, ppid=107, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState in 181 msec 2024-12-09T11:00:30,162 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733742030162"}]},"ts":"1733742030162"} 2024-12-09T11:00:30,164 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLED in hbase:meta 2024-12-09T11:00:30,164 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemState to state=DISABLED 2024-12-09T11:00:30,166 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=107, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState in 194 msec 2024-12-09T11:00:30,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-12-09T11:00:30,292 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemState completed 2024-12-09T11:00:30,293 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemState 2024-12-09T11:00:30,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=113, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-09T11:00:30,295 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=113, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-09T11:00:30,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemState 2024-12-09T11:00:30,297 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=113, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-09T11:00:30,299 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39691 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemState 2024-12-09T11:00:30,301 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemState/e2e56bbf9999b467c03e19ef66dd5b5c 2024-12-09T11:00:30,302 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemState/cf51409f107b64b6053b05186b328bf2 2024-12-09T11:00:30,304 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemState/e2e56bbf9999b467c03e19ef66dd5b5c/cf, FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemState/e2e56bbf9999b467c03e19ef66dd5b5c/recovered.edits] 2024-12-09T11:00:30,304 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemState/cf51409f107b64b6053b05186b328bf2/cf, FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemState/cf51409f107b64b6053b05186b328bf2/recovered.edits] 2024-12-09T11:00:30,312 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemState/cf51409f107b64b6053b05186b328bf2/cf/bae0ff9b21e743f28ff34ed55b8b1670 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportFileSystemState/cf51409f107b64b6053b05186b328bf2/cf/bae0ff9b21e743f28ff34ed55b8b1670 2024-12-09T11:00:30,312 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemState/e2e56bbf9999b467c03e19ef66dd5b5c/cf/b5a3cb5ff3b94b04817978be5a728f96 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportFileSystemState/e2e56bbf9999b467c03e19ef66dd5b5c/cf/b5a3cb5ff3b94b04817978be5a728f96 2024-12-09T11:00:30,316 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemState/cf51409f107b64b6053b05186b328bf2/recovered.edits/9.seqid to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportFileSystemState/cf51409f107b64b6053b05186b328bf2/recovered.edits/9.seqid 2024-12-09T11:00:30,316 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemState/cf51409f107b64b6053b05186b328bf2 2024-12-09T11:00:30,319 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemState/e2e56bbf9999b467c03e19ef66dd5b5c/recovered.edits/9.seqid to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportFileSystemState/e2e56bbf9999b467c03e19ef66dd5b5c/recovered.edits/9.seqid 2024-12-09T11:00:30,319 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemState/e2e56bbf9999b467c03e19ef66dd5b5c 2024-12-09T11:00:30,319 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemState regions 2024-12-09T11:00:30,320 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d 2024-12-09T11:00:30,321 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf] 2024-12-09T11:00:30,326 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b2024120924918517d06c4d9a94161c0c92267a82_cf51409f107b64b6053b05186b328bf2 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b2024120924918517d06c4d9a94161c0c92267a82_cf51409f107b64b6053b05186b328bf2 2024-12-09T11:00:30,328 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e20241209730debba470f42cdbdbb3fb5f28284d4_e2e56bbf9999b467c03e19ef66dd5b5c to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e20241209730debba470f42cdbdbb3fb5f28284d4_e2e56bbf9999b467c03e19ef66dd5b5c 2024-12-09T11:00:30,329 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d 2024-12-09T11:00:30,332 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=113, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-09T11:00:30,336 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemState from hbase:meta 2024-12-09T11:00:30,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-09T11:00:30,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-09T11:00:30,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-09T11:00:30,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-09T11:00:30,347 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-09T11:00:30,347 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-09T11:00:30,348 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-09T11:00:30,348 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-09T11:00:30,349 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemState' descriptor. 2024-12-09T11:00:30,350 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=113, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-09T11:00:30,350 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemState' from region states. 2024-12-09T11:00:30,351 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,,1733741998822.e2e56bbf9999b467c03e19ef66dd5b5c.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733742030350"}]},"ts":"9223372036854775807"} 2024-12-09T11:00:30,351 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,1,1733741998822.cf51409f107b64b6053b05186b328bf2.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733742030350"}]},"ts":"9223372036854775807"} 2024-12-09T11:00:30,353 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-09T11:00:30,353 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => e2e56bbf9999b467c03e19ef66dd5b5c, NAME => 'testtb-testExportFileSystemState,,1733741998822.e2e56bbf9999b467c03e19ef66dd5b5c.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => cf51409f107b64b6053b05186b328bf2, NAME => 'testtb-testExportFileSystemState,1,1733741998822.cf51409f107b64b6053b05186b328bf2.', STARTKEY => '1', ENDKEY => ''}] 2024-12-09T11:00:30,354 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemState' as deleted. 2024-12-09T11:00:30,354 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733742030354"}]},"ts":"9223372036854775807"} 2024-12-09T11:00:30,356 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemState state from META 2024-12-09T11:00:30,357 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=113, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-09T11:00:30,358 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=113, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState in 64 msec 2024-12-09T11:00:30,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-09T11:00:30,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-09T11:00:30,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:00:30,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-09T11:00:30,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:00:30,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-09T11:00:30,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:00:30,378 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:00:30,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=113 2024-12-09T11:00:30,379 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemState 2024-12-09T11:00:30,379 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemState completed 2024-12-09T11:00:30,388 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemState" type: DISABLED 2024-12-09T11:00:30,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemState 2024-12-09T11:00:30,393 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemState" type: DISABLED 2024-12-09T11:00:30,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemState 2024-12-09T11:00:30,418 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemState Thread=795 (was 793) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39057 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1359318497) connection to localhost/127.0.0.1:42159 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1751713061_22 at /127.0.0.1:60954 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1751713061_22 at /127.0.0.1:35522 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-4017 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Client (1359318497) connection to localhost/127.0.0.1:42159 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42159 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42159 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1751713061_22 at /127.0.0.1:40042 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_270068513_1 at /127.0.0.1:40024 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 9172) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_270068513_1 at /127.0.0.1:35504 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1359318497) connection to localhost/127.0.0.1:39057 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=807 (was 801) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1354 (was 1269) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 14) - ProcessCount LEAK? -, AvailableMemoryMB=3078 (was 3488) 2024-12-09T11:00:30,418 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=795 is superior to 500 2024-12-09T11:00:30,438 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testConsecutiveExports Thread=795, OpenFileDescriptor=807, MaxFileDescriptor=1048576, SystemLoadAverage=1354, ProcessCount=17, AvailableMemoryMB=3077 2024-12-09T11:00:30,438 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=795 is superior to 500 2024-12-09T11:00:30,439 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T11:00:30,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=114, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports 2024-12-09T11:00:30,442 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T11:00:30,442 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testConsecutiveExports" procId is: 114 2024-12-09T11:00:30,442 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T11:00:30,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-09T11:00:30,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742093_1269 (size=440) 2024-12-09T11:00:30,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742093_1269 (size=440) 2024-12-09T11:00:30,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742093_1269 (size=440) 2024-12-09T11:00:30,455 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 6b4e91789c9e6871014ad3a1098f268b, NAME => 'testtb-testConsecutiveExports,,1733742030439.6b4e91789c9e6871014ad3a1098f268b.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T11:00:30,463 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => e4d7e2d2271822e45776321fc76f1589, NAME => 'testtb-testConsecutiveExports,1,1733742030439.e4d7e2d2271822e45776321fc76f1589.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T11:00:30,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742094_1270 (size=65) 2024-12-09T11:00:30,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742094_1270 (size=65) 2024-12-09T11:00:30,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742094_1270 (size=65) 2024-12-09T11:00:30,493 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1733742030439.6b4e91789c9e6871014ad3a1098f268b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:00:30,493 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1722): Closing 6b4e91789c9e6871014ad3a1098f268b, disabling compactions & flushes 2024-12-09T11:00:30,493 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1733742030439.6b4e91789c9e6871014ad3a1098f268b. 2024-12-09T11:00:30,493 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1733742030439.6b4e91789c9e6871014ad3a1098f268b. 2024-12-09T11:00:30,493 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1733742030439.6b4e91789c9e6871014ad3a1098f268b. after waiting 0 ms 2024-12-09T11:00:30,493 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1733742030439.6b4e91789c9e6871014ad3a1098f268b. 2024-12-09T11:00:30,493 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1733742030439.6b4e91789c9e6871014ad3a1098f268b. 2024-12-09T11:00:30,493 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1676): Region close journal for 6b4e91789c9e6871014ad3a1098f268b: Waiting for close lock at 1733742030493Disabling compacts and flushes for region at 1733742030493Disabling writes for close at 1733742030493Writing region close event to WAL at 1733742030493Closed at 1733742030493 2024-12-09T11:00:30,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742095_1271 (size=65) 2024-12-09T11:00:30,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742095_1271 (size=65) 2024-12-09T11:00:30,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742095_1271 (size=65) 2024-12-09T11:00:30,500 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1733742030439.e4d7e2d2271822e45776321fc76f1589.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:00:30,500 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1722): Closing e4d7e2d2271822e45776321fc76f1589, disabling compactions & flushes 2024-12-09T11:00:30,500 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1733742030439.e4d7e2d2271822e45776321fc76f1589. 2024-12-09T11:00:30,500 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1733742030439.e4d7e2d2271822e45776321fc76f1589. 2024-12-09T11:00:30,500 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1733742030439.e4d7e2d2271822e45776321fc76f1589. after waiting 0 ms 2024-12-09T11:00:30,500 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1733742030439.e4d7e2d2271822e45776321fc76f1589. 2024-12-09T11:00:30,500 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1733742030439.e4d7e2d2271822e45776321fc76f1589. 2024-12-09T11:00:30,500 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1676): Region close journal for e4d7e2d2271822e45776321fc76f1589: Waiting for close lock at 1733742030500Disabling compacts and flushes for region at 1733742030500Disabling writes for close at 1733742030500Writing region close event to WAL at 1733742030500Closed at 1733742030500 2024-12-09T11:00:30,502 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T11:00:30,502 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,,1733742030439.6b4e91789c9e6871014ad3a1098f268b.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733742030502"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733742030502"}]},"ts":"1733742030502"} 2024-12-09T11:00:30,502 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,1,1733742030439.e4d7e2d2271822e45776321fc76f1589.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733742030502"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733742030502"}]},"ts":"1733742030502"} 2024-12-09T11:00:30,505 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-09T11:00:30,506 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T11:00:30,506 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733742030506"}]},"ts":"1733742030506"} 2024-12-09T11:00:30,509 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLING in hbase:meta 2024-12-09T11:00:30,509 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {3469f9ca0af3=0} racks are {/default-rack=0} 2024-12-09T11:00:30,510 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T11:00:30,511 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T11:00:30,511 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T11:00:30,511 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T11:00:30,511 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T11:00:30,511 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T11:00:30,511 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T11:00:30,511 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T11:00:30,511 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T11:00:30,511 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T11:00:30,511 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=6b4e91789c9e6871014ad3a1098f268b, ASSIGN}, {pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=e4d7e2d2271822e45776321fc76f1589, ASSIGN}] 2024-12-09T11:00:30,512 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=6b4e91789c9e6871014ad3a1098f268b, ASSIGN 2024-12-09T11:00:30,512 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=e4d7e2d2271822e45776321fc76f1589, ASSIGN 2024-12-09T11:00:30,513 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=e4d7e2d2271822e45776321fc76f1589, ASSIGN; state=OFFLINE, location=3469f9ca0af3,39691,1733741766880; forceNewPlan=false, retain=false 2024-12-09T11:00:30,513 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=6b4e91789c9e6871014ad3a1098f268b, ASSIGN; state=OFFLINE, location=3469f9ca0af3,42349,1733741767108; forceNewPlan=false, retain=false 2024-12-09T11:00:30,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-09T11:00:30,664 INFO [3469f9ca0af3:35815 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-09T11:00:30,664 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=6b4e91789c9e6871014ad3a1098f268b, regionState=OPENING, regionLocation=3469f9ca0af3,42349,1733741767108 2024-12-09T11:00:30,665 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=116 updating hbase:meta row=e4d7e2d2271822e45776321fc76f1589, regionState=OPENING, regionLocation=3469f9ca0af3,39691,1733741766880 2024-12-09T11:00:30,667 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=6b4e91789c9e6871014ad3a1098f268b, ASSIGN because future has completed 2024-12-09T11:00:30,667 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=117, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6b4e91789c9e6871014ad3a1098f268b, server=3469f9ca0af3,42349,1733741767108}] 2024-12-09T11:00:30,668 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=e4d7e2d2271822e45776321fc76f1589, ASSIGN because future has completed 2024-12-09T11:00:30,669 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=118, ppid=116, state=RUNNABLE, hasLock=false; OpenRegionProcedure e4d7e2d2271822e45776321fc76f1589, server=3469f9ca0af3,39691,1733741766880}] 2024-12-09T11:00:30,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-09T11:00:30,828 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,,1733742030439.6b4e91789c9e6871014ad3a1098f268b. 2024-12-09T11:00:30,828 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,1,1733742030439.e4d7e2d2271822e45776321fc76f1589. 2024-12-09T11:00:30,828 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7752): Opening region: {ENCODED => e4d7e2d2271822e45776321fc76f1589, NAME => 'testtb-testConsecutiveExports,1,1733742030439.e4d7e2d2271822e45776321fc76f1589.', STARTKEY => '1', ENDKEY => ''} 2024-12-09T11:00:30,828 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7752): Opening region: {ENCODED => 6b4e91789c9e6871014ad3a1098f268b, NAME => 'testtb-testConsecutiveExports,,1733742030439.6b4e91789c9e6871014ad3a1098f268b.', STARTKEY => '', ENDKEY => '1'} 2024-12-09T11:00:30,828 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,1,1733742030439.e4d7e2d2271822e45776321fc76f1589. service=AccessControlService 2024-12-09T11:00:30,828 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,,1733742030439.6b4e91789c9e6871014ad3a1098f268b. service=AccessControlService 2024-12-09T11:00:30,828 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T11:00:30,828 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T11:00:30,829 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 6b4e91789c9e6871014ad3a1098f268b 2024-12-09T11:00:30,829 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports e4d7e2d2271822e45776321fc76f1589 2024-12-09T11:00:30,829 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1733742030439.6b4e91789c9e6871014ad3a1098f268b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:00:30,829 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1733742030439.e4d7e2d2271822e45776321fc76f1589.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:00:30,829 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7794): checking encryption for 6b4e91789c9e6871014ad3a1098f268b 2024-12-09T11:00:30,829 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7797): checking classloading for 6b4e91789c9e6871014ad3a1098f268b 2024-12-09T11:00:30,829 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7794): checking encryption for e4d7e2d2271822e45776321fc76f1589 2024-12-09T11:00:30,829 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7797): checking classloading for e4d7e2d2271822e45776321fc76f1589 2024-12-09T11:00:30,830 INFO [StoreOpener-e4d7e2d2271822e45776321fc76f1589-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region e4d7e2d2271822e45776321fc76f1589 2024-12-09T11:00:30,832 INFO [StoreOpener-e4d7e2d2271822e45776321fc76f1589-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e4d7e2d2271822e45776321fc76f1589 columnFamilyName cf 2024-12-09T11:00:30,833 DEBUG [StoreOpener-e4d7e2d2271822e45776321fc76f1589-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:00:30,834 INFO [StoreOpener-e4d7e2d2271822e45776321fc76f1589-1 {}] regionserver.HStore(327): Store=e4d7e2d2271822e45776321fc76f1589/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:00:30,834 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1038): replaying wal for e4d7e2d2271822e45776321fc76f1589 2024-12-09T11:00:30,835 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testConsecutiveExports/e4d7e2d2271822e45776321fc76f1589 2024-12-09T11:00:30,835 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testConsecutiveExports/e4d7e2d2271822e45776321fc76f1589 2024-12-09T11:00:30,836 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1048): stopping wal replay for e4d7e2d2271822e45776321fc76f1589 2024-12-09T11:00:30,836 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1060): Cleaning up temporary data for e4d7e2d2271822e45776321fc76f1589 2024-12-09T11:00:30,838 INFO [StoreOpener-6b4e91789c9e6871014ad3a1098f268b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 6b4e91789c9e6871014ad3a1098f268b 2024-12-09T11:00:30,839 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1093): writing seq id for e4d7e2d2271822e45776321fc76f1589 2024-12-09T11:00:30,840 INFO [StoreOpener-6b4e91789c9e6871014ad3a1098f268b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6b4e91789c9e6871014ad3a1098f268b columnFamilyName cf 2024-12-09T11:00:30,841 DEBUG [StoreOpener-6b4e91789c9e6871014ad3a1098f268b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:00:30,841 INFO [StoreOpener-6b4e91789c9e6871014ad3a1098f268b-1 {}] regionserver.HStore(327): Store=6b4e91789c9e6871014ad3a1098f268b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:00:30,841 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testConsecutiveExports/e4d7e2d2271822e45776321fc76f1589/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:00:30,841 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1038): replaying wal for 6b4e91789c9e6871014ad3a1098f268b 2024-12-09T11:00:30,842 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testConsecutiveExports/6b4e91789c9e6871014ad3a1098f268b 2024-12-09T11:00:30,842 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testConsecutiveExports/6b4e91789c9e6871014ad3a1098f268b 2024-12-09T11:00:30,843 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1114): Opened e4d7e2d2271822e45776321fc76f1589; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65228446, jitterRate=-0.028020411729812622}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T11:00:30,843 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e4d7e2d2271822e45776321fc76f1589 2024-12-09T11:00:30,844 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1006): Region open journal for e4d7e2d2271822e45776321fc76f1589: Running coprocessor pre-open hook at 1733742030829Writing region info on filesystem at 1733742030829Initializing all the Stores at 1733742030830 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733742030830Cleaning up temporary data from old regions at 1733742030836 (+6 ms)Running coprocessor post-open hooks at 1733742030843 (+7 ms)Region opened successfully at 1733742030844 (+1 ms) 2024-12-09T11:00:30,845 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1048): stopping wal replay for 6b4e91789c9e6871014ad3a1098f268b 2024-12-09T11:00:30,845 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1060): Cleaning up temporary data for 6b4e91789c9e6871014ad3a1098f268b 2024-12-09T11:00:30,846 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,1,1733742030439.e4d7e2d2271822e45776321fc76f1589., pid=118, masterSystemTime=1733742030821 2024-12-09T11:00:30,853 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1093): writing seq id for 6b4e91789c9e6871014ad3a1098f268b 2024-12-09T11:00:30,854 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,1,1733742030439.e4d7e2d2271822e45776321fc76f1589. 2024-12-09T11:00:30,854 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,1,1733742030439.e4d7e2d2271822e45776321fc76f1589. 2024-12-09T11:00:30,858 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=116 updating hbase:meta row=e4d7e2d2271822e45776321fc76f1589, regionState=OPEN, openSeqNum=2, regionLocation=3469f9ca0af3,39691,1733741766880 2024-12-09T11:00:30,864 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=118, ppid=116, state=RUNNABLE, hasLock=false; OpenRegionProcedure e4d7e2d2271822e45776321fc76f1589, server=3469f9ca0af3,39691,1733741766880 because future has completed 2024-12-09T11:00:30,870 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=118, resume processing ppid=116 2024-12-09T11:00:30,870 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=118, ppid=116, state=SUCCESS, hasLock=false; OpenRegionProcedure e4d7e2d2271822e45776321fc76f1589, server=3469f9ca0af3,39691,1733741766880 in 196 msec 2024-12-09T11:00:30,875 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=116, ppid=114, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=e4d7e2d2271822e45776321fc76f1589, ASSIGN in 359 msec 2024-12-09T11:00:30,878 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testConsecutiveExports/6b4e91789c9e6871014ad3a1098f268b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:00:30,879 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1114): Opened 6b4e91789c9e6871014ad3a1098f268b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70816185, jitterRate=0.05524338781833649}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T11:00:30,879 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6b4e91789c9e6871014ad3a1098f268b 2024-12-09T11:00:30,879 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1006): Region open journal for 6b4e91789c9e6871014ad3a1098f268b: Running coprocessor pre-open hook at 1733742030829Writing region info on filesystem at 1733742030829Initializing all the Stores at 1733742030830 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733742030830Cleaning up temporary data from old regions at 1733742030845 (+15 ms)Running coprocessor post-open hooks at 1733742030879 (+34 ms)Region opened successfully at 1733742030879 2024-12-09T11:00:30,886 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,,1733742030439.6b4e91789c9e6871014ad3a1098f268b., pid=117, masterSystemTime=1733742030820 2024-12-09T11:00:30,895 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,,1733742030439.6b4e91789c9e6871014ad3a1098f268b. 2024-12-09T11:00:30,895 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,,1733742030439.6b4e91789c9e6871014ad3a1098f268b. 2024-12-09T11:00:30,900 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=6b4e91789c9e6871014ad3a1098f268b, regionState=OPEN, openSeqNum=2, regionLocation=3469f9ca0af3,42349,1733741767108 2024-12-09T11:00:30,907 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=117, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6b4e91789c9e6871014ad3a1098f268b, server=3469f9ca0af3,42349,1733741767108 because future has completed 2024-12-09T11:00:30,918 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=117, resume processing ppid=115 2024-12-09T11:00:30,918 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=117, ppid=115, state=SUCCESS, hasLock=false; OpenRegionProcedure 6b4e91789c9e6871014ad3a1098f268b, server=3469f9ca0af3,42349,1733741767108 in 243 msec 2024-12-09T11:00:30,923 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=115, resume processing ppid=114 2024-12-09T11:00:30,923 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=115, ppid=114, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=6b4e91789c9e6871014ad3a1098f268b, ASSIGN in 407 msec 2024-12-09T11:00:30,924 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T11:00:30,924 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733742030924"}]},"ts":"1733742030924"} 2024-12-09T11:00:30,932 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLED in hbase:meta 2024-12-09T11:00:30,933 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T11:00:30,934 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testConsecutiveExports jenkins: RWXCA 2024-12-09T11:00:30,942 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39691 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-09T11:00:31,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:00:31,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:00:31,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:00:31,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:00:31,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-09T11:00:31,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-09T11:00:31,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-09T11:00:31,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-09T11:00:31,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-09T11:00:31,167 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-09T11:00:31,167 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-09T11:00:31,172 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-09T11:00:31,173 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-09T11:00:31,174 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=114, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports in 728 msec 2024-12-09T11:00:31,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-09T11:00:31,583 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testConsecutiveExports completed 2024-12-09T11:00:31,583 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-09T11:00:31,588 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testConsecutiveExports 2024-12-09T11:00:31,588 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testConsecutiveExports,,1733742030439.6b4e91789c9e6871014ad3a1098f268b. 2024-12-09T11:00:31,588 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T11:00:31,591 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-09T11:00:31,601 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-09T11:00:31,610 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-09T11:00:31,614 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-09T11:00:31,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733742031614 (current time:1733742031614). 2024-12-09T11:00:31,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T11:00:31,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-09T11:00:31,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T11:00:31,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@317d3503, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:00:31,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T11:00:31,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T11:00:31,618 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T11:00:31,618 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T11:00:31,618 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T11:00:31,619 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@508641d7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:00:31,619 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T11:00:31,619 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T11:00:31,619 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:00:31,620 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48144, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T11:00:31,621 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@fa52e80, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:00:31,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:00:31,630 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,39691,1733741766880, seqNum=-1] 2024-12-09T11:00:31,630 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:00:31,634 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39030, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:00:31,636 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815. 2024-12-09T11:00:31,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T11:00:31,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:00:31,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:00:31,636 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T11:00:31,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a8d3982, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:00:31,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T11:00:31,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T11:00:31,643 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T11:00:31,643 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T11:00:31,643 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T11:00:31,644 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1159a71a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:00:31,644 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T11:00:31,644 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T11:00:31,644 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:00:31,646 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48168, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T11:00:31,650 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6df16145, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:00:31,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:00:31,652 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,39691,1733741766880, seqNum=-1] 2024-12-09T11:00:31,653 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:00:31,654 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39036, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:00:31,659 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb., hostname=3469f9ca0af3,39691,1733741766880, seqNum=2] 2024-12-09T11:00:31,661 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815. 2024-12-09T11:00:31,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T11:00:31,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:00:31,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:00:31,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-09T11:00:31,662 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T11:00:31,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T11:00:31,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=119, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-09T11:00:31,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 119 2024-12-09T11:00:31,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-09T11:00:31,673 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T11:00:31,678 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T11:00:31,683 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T11:00:31,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742096_1272 (size=161) 2024-12-09T11:00:31,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742096_1272 (size=161) 2024-12-09T11:00:31,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742096_1272 (size=161) 2024-12-09T11:00:31,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-09T11:00:31,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-09T11:00:32,166 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T11:00:32,166 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6b4e91789c9e6871014ad3a1098f268b}, {pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e4d7e2d2271822e45776321fc76f1589}] 2024-12-09T11:00:32,167 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e4d7e2d2271822e45776321fc76f1589 2024-12-09T11:00:32,168 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6b4e91789c9e6871014ad3a1098f268b 2024-12-09T11:00:32,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-09T11:00:32,326 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42349 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=120 2024-12-09T11:00:32,326 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39691 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=121 2024-12-09T11:00:32,327 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1733742030439.6b4e91789c9e6871014ad3a1098f268b. 2024-12-09T11:00:32,327 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.HRegion(2603): Flush status journal for 6b4e91789c9e6871014ad3a1098f268b: 2024-12-09T11:00:32,327 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1733742030439.6b4e91789c9e6871014ad3a1098f268b. for emptySnaptb0-testConsecutiveExports completed. 2024-12-09T11:00:32,327 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1733742030439.6b4e91789c9e6871014ad3a1098f268b.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-09T11:00:32,327 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T11:00:32,327 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-09T11:00:32,328 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1733742030439.e4d7e2d2271822e45776321fc76f1589. 2024-12-09T11:00:32,328 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.HRegion(2603): Flush status journal for e4d7e2d2271822e45776321fc76f1589: 2024-12-09T11:00:32,328 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1733742030439.e4d7e2d2271822e45776321fc76f1589. for emptySnaptb0-testConsecutiveExports completed. 2024-12-09T11:00:32,328 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1733742030439.e4d7e2d2271822e45776321fc76f1589.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-09T11:00:32,328 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T11:00:32,328 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-09T11:00:32,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742098_1274 (size=68) 2024-12-09T11:00:32,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742098_1274 (size=68) 2024-12-09T11:00:32,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742098_1274 (size=68) 2024-12-09T11:00:32,397 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1733742030439.e4d7e2d2271822e45776321fc76f1589. 2024-12-09T11:00:32,397 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-12-09T11:00:32,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=121 2024-12-09T11:00:32,398 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region e4d7e2d2271822e45776321fc76f1589 2024-12-09T11:00:32,398 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e4d7e2d2271822e45776321fc76f1589 2024-12-09T11:00:32,404 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=121, ppid=119, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e4d7e2d2271822e45776321fc76f1589 in 236 msec 2024-12-09T11:00:32,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742097_1273 (size=68) 2024-12-09T11:00:32,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742097_1273 (size=68) 2024-12-09T11:00:32,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742097_1273 (size=68) 2024-12-09T11:00:32,426 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1733742030439.6b4e91789c9e6871014ad3a1098f268b. 2024-12-09T11:00:32,426 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=120 2024-12-09T11:00:32,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=120 2024-12-09T11:00:32,430 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 6b4e91789c9e6871014ad3a1098f268b 2024-12-09T11:00:32,430 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6b4e91789c9e6871014ad3a1098f268b 2024-12-09T11:00:32,442 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=120, resume processing ppid=119 2024-12-09T11:00:32,442 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=120, ppid=119, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 6b4e91789c9e6871014ad3a1098f268b in 272 msec 2024-12-09T11:00:32,442 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T11:00:32,444 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T11:00:32,458 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-09T11:00:32,458 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-09T11:00:32,458 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:00:32,459 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-09T11:00:32,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742099_1275 (size=60) 2024-12-09T11:00:32,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742099_1275 (size=60) 2024-12-09T11:00:32,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742099_1275 (size=60) 2024-12-09T11:00:32,486 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T11:00:32,486 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testConsecutiveExports 2024-12-09T11:00:32,489 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports 2024-12-09T11:00:32,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742100_1276 (size=641) 2024-12-09T11:00:32,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742100_1276 (size=641) 2024-12-09T11:00:32,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742100_1276 (size=641) 2024-12-09T11:00:32,620 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T11:00:32,633 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T11:00:32,633 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/emptySnaptb0-testConsecutiveExports 2024-12-09T11:00:32,636 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T11:00:32,636 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 119 2024-12-09T11:00:32,638 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=119, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 969 msec 2024-12-09T11:00:32,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-09T11:00:32,815 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-12-09T11:00:32,825 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42349 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,,1733742030439.6b4e91789c9e6871014ad3a1098f268b. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T11:00:32,838 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39691 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,1,1733742030439.e4d7e2d2271822e45776321fc76f1589. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T11:00:32,839 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-09T11:00:32,843 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testConsecutiveExports 2024-12-09T11:00:32,843 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testConsecutiveExports,,1733742030439.6b4e91789c9e6871014ad3a1098f268b. 2024-12-09T11:00:32,843 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T11:00:32,847 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-09T11:00:32,864 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-09T11:00:32,877 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-09T11:00:32,881 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-09T11:00:32,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733742032881 (current time:1733742032881). 2024-12-09T11:00:32,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T11:00:32,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-09T11:00:32,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T11:00:32,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2148baa1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:00:32,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T11:00:32,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T11:00:32,883 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T11:00:32,883 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T11:00:32,884 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T11:00:32,884 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74e3220f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:00:32,884 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T11:00:32,884 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T11:00:32,884 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:00:32,885 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48182, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T11:00:32,886 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2cbe70c8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:00:32,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:00:32,887 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,39691,1733741766880, seqNum=-1] 2024-12-09T11:00:32,888 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:00:32,889 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39048, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:00:32,891 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815. 2024-12-09T11:00:32,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T11:00:32,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:00:32,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:00:32,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f85bc2b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:00:32,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T11:00:32,892 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T11:00:32,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T11:00:32,894 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T11:00:32,895 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T11:00:32,895 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T11:00:32,895 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73cea02e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:00:32,895 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T11:00:32,895 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T11:00:32,896 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:00:32,897 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48198, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T11:00:32,898 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42486e9a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:00:32,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:00:32,900 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,39691,1733741766880, seqNum=-1] 2024-12-09T11:00:32,900 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:00:32,901 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39050, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:00:32,903 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb., hostname=3469f9ca0af3,39691,1733741766880, seqNum=2] 2024-12-09T11:00:32,905 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815. 2024-12-09T11:00:32,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T11:00:32,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:00:32,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:00:32,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-09T11:00:32,907 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T11:00:32,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T11:00:32,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-09T11:00:32,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-09T11:00:32,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-09T11:00:32,910 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T11:00:32,912 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T11:00:32,924 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T11:00:32,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742101_1277 (size=156) 2024-12-09T11:00:32,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742101_1277 (size=156) 2024-12-09T11:00:32,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742101_1277 (size=156) 2024-12-09T11:00:33,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-09T11:00:33,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-09T11:00:33,388 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T11:00:33,388 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6b4e91789c9e6871014ad3a1098f268b}, {pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e4d7e2d2271822e45776321fc76f1589}] 2024-12-09T11:00:33,390 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6b4e91789c9e6871014ad3a1098f268b 2024-12-09T11:00:33,390 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e4d7e2d2271822e45776321fc76f1589 2024-12-09T11:00:33,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-09T11:00:33,542 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42349 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=123 2024-12-09T11:00:33,543 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1733742030439.6b4e91789c9e6871014ad3a1098f268b. 2024-12-09T11:00:33,543 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2902): Flushing 6b4e91789c9e6871014ad3a1098f268b 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-09T11:00:33,546 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39691 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=124 2024-12-09T11:00:33,546 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1733742030439.e4d7e2d2271822e45776321fc76f1589. 2024-12-09T11:00:33,547 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(2902): Flushing e4d7e2d2271822e45776321fc76f1589 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-09T11:00:33,590 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209118e179f4a9545d29eb38127d5bb75d8_6b4e91789c9e6871014ad3a1098f268b is 71, key is 03ea37a2285343b37ea195c766be14d6/cf:q/1733742032825/Put/seqid=0 2024-12-09T11:00:33,590 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202412094c73b75e33934c839e5a87384151c4e8_e4d7e2d2271822e45776321fc76f1589 is 71, key is 138b8d9f76aa27891d7f4fdbbefcf16c/cf:q/1733742032828/Put/seqid=0 2024-12-09T11:00:33,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742102_1278 (size=8241) 2024-12-09T11:00:33,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742102_1278 (size=8241) 2024-12-09T11:00:33,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742102_1278 (size=8241) 2024-12-09T11:00:33,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742103_1279 (size=5032) 2024-12-09T11:00:33,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742103_1279 (size=5032) 2024-12-09T11:00:33,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742103_1279 (size=5032) 2024-12-09T11:00:33,634 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testConsecutiveExports' 2024-12-09T11:00:34,018 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:00:34,021 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:00:34,034 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209118e179f4a9545d29eb38127d5bb75d8_6b4e91789c9e6871014ad3a1098f268b to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e20241209118e179f4a9545d29eb38127d5bb75d8_6b4e91789c9e6871014ad3a1098f268b 2024-12-09T11:00:34,037 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testConsecutiveExports/6b4e91789c9e6871014ad3a1098f268b/.tmp/cf/c6175a15d1e845c291e93c1e8fb479f4, store: [table=testtb-testConsecutiveExports family=cf region=6b4e91789c9e6871014ad3a1098f268b] 2024-12-09T11:00:34,038 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testConsecutiveExports/6b4e91789c9e6871014ad3a1098f268b/.tmp/cf/c6175a15d1e845c291e93c1e8fb479f4 is 206, key is 0f952f4cdfee5814d52180249f9a18bb8/cf:q/1733742032825/Put/seqid=0 2024-12-09T11:00:34,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-09T11:00:34,059 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202412094c73b75e33934c839e5a87384151c4e8_e4d7e2d2271822e45776321fc76f1589 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b202412094c73b75e33934c839e5a87384151c4e8_e4d7e2d2271822e45776321fc76f1589 2024-12-09T11:00:34,061 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testConsecutiveExports/e4d7e2d2271822e45776321fc76f1589/.tmp/cf/4a16fe9df5924f5694a13837aec0957f, store: [table=testtb-testConsecutiveExports family=cf region=e4d7e2d2271822e45776321fc76f1589] 2024-12-09T11:00:34,062 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testConsecutiveExports/e4d7e2d2271822e45776321fc76f1589/.tmp/cf/4a16fe9df5924f5694a13837aec0957f is 206, key is 197ebf551e7688eaa33e39b5e76210dd9/cf:q/1733742032828/Put/seqid=0 2024-12-09T11:00:34,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742105_1281 (size=15055) 2024-12-09T11:00:34,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742105_1281 (size=15055) 2024-12-09T11:00:34,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742105_1281 (size=15055) 2024-12-09T11:00:34,119 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testConsecutiveExports/e4d7e2d2271822e45776321fc76f1589/.tmp/cf/4a16fe9df5924f5694a13837aec0957f 2024-12-09T11:00:34,127 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testConsecutiveExports/e4d7e2d2271822e45776321fc76f1589/.tmp/cf/4a16fe9df5924f5694a13837aec0957f as hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testConsecutiveExports/e4d7e2d2271822e45776321fc76f1589/cf/4a16fe9df5924f5694a13837aec0957f 2024-12-09T11:00:34,134 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testConsecutiveExports/e4d7e2d2271822e45776321fc76f1589/cf/4a16fe9df5924f5694a13837aec0957f, entries=48, sequenceid=6, filesize=14.7 K 2024-12-09T11:00:34,135 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for e4d7e2d2271822e45776321fc76f1589 in 588ms, sequenceid=6, compaction requested=false 2024-12-09T11:00:34,135 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(2603): Flush status journal for e4d7e2d2271822e45776321fc76f1589: 2024-12-09T11:00:34,135 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1733742030439.e4d7e2d2271822e45776321fc76f1589. for snaptb0-testConsecutiveExports completed. 2024-12-09T11:00:34,136 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1733742030439.e4d7e2d2271822e45776321fc76f1589.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-09T11:00:34,136 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T11:00:34,136 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testConsecutiveExports/e4d7e2d2271822e45776321fc76f1589/cf/4a16fe9df5924f5694a13837aec0957f] hfiles 2024-12-09T11:00:34,136 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testConsecutiveExports/e4d7e2d2271822e45776321fc76f1589/cf/4a16fe9df5924f5694a13837aec0957f for snapshot=snaptb0-testConsecutiveExports 2024-12-09T11:00:34,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742104_1280 (size=5700) 2024-12-09T11:00:34,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742104_1280 (size=5700) 2024-12-09T11:00:34,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742104_1280 (size=5700) 2024-12-09T11:00:34,153 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=132, hasBloomFilter=true, into tmp file hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testConsecutiveExports/6b4e91789c9e6871014ad3a1098f268b/.tmp/cf/c6175a15d1e845c291e93c1e8fb479f4 2024-12-09T11:00:34,162 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testConsecutiveExports/6b4e91789c9e6871014ad3a1098f268b/.tmp/cf/c6175a15d1e845c291e93c1e8fb479f4 as hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testConsecutiveExports/6b4e91789c9e6871014ad3a1098f268b/cf/c6175a15d1e845c291e93c1e8fb479f4 2024-12-09T11:00:34,182 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testConsecutiveExports/6b4e91789c9e6871014ad3a1098f268b/cf/c6175a15d1e845c291e93c1e8fb479f4, entries=2, sequenceid=6, filesize=5.6 K 2024-12-09T11:00:34,184 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 6b4e91789c9e6871014ad3a1098f268b in 641ms, sequenceid=6, compaction requested=false 2024-12-09T11:00:34,184 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2603): Flush status journal for 6b4e91789c9e6871014ad3a1098f268b: 2024-12-09T11:00:34,184 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1733742030439.6b4e91789c9e6871014ad3a1098f268b. for snaptb0-testConsecutiveExports completed. 2024-12-09T11:00:34,184 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1733742030439.6b4e91789c9e6871014ad3a1098f268b.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-09T11:00:34,184 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T11:00:34,184 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testConsecutiveExports/6b4e91789c9e6871014ad3a1098f268b/cf/c6175a15d1e845c291e93c1e8fb479f4] hfiles 2024-12-09T11:00:34,184 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testConsecutiveExports/6b4e91789c9e6871014ad3a1098f268b/cf/c6175a15d1e845c291e93c1e8fb479f4 for snapshot=snaptb0-testConsecutiveExports 2024-12-09T11:00:34,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742106_1282 (size=107) 2024-12-09T11:00:34,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742106_1282 (size=107) 2024-12-09T11:00:34,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742106_1282 (size=107) 2024-12-09T11:00:34,236 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1733742030439.e4d7e2d2271822e45776321fc76f1589. 2024-12-09T11:00:34,236 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=124 2024-12-09T11:00:34,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=124 2024-12-09T11:00:34,237 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region e4d7e2d2271822e45776321fc76f1589 2024-12-09T11:00:34,237 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e4d7e2d2271822e45776321fc76f1589 2024-12-09T11:00:34,241 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=124, ppid=122, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e4d7e2d2271822e45776321fc76f1589 in 851 msec 2024-12-09T11:00:34,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742107_1283 (size=107) 2024-12-09T11:00:34,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742107_1283 (size=107) 2024-12-09T11:00:34,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742107_1283 (size=107) 2024-12-09T11:00:34,278 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1733742030439.6b4e91789c9e6871014ad3a1098f268b. 2024-12-09T11:00:34,278 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-12-09T11:00:34,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=123 2024-12-09T11:00:34,279 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 6b4e91789c9e6871014ad3a1098f268b 2024-12-09T11:00:34,279 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6b4e91789c9e6871014ad3a1098f268b 2024-12-09T11:00:34,283 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=123, resume processing ppid=122 2024-12-09T11:00:34,283 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=123, ppid=122, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 6b4e91789c9e6871014ad3a1098f268b in 892 msec 2024-12-09T11:00:34,283 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T11:00:34,284 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T11:00:34,297 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-09T11:00:34,297 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-09T11:00:34,297 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:00:34,300 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b202412094c73b75e33934c839e5a87384151c4e8_e4d7e2d2271822e45776321fc76f1589, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e20241209118e179f4a9545d29eb38127d5bb75d8_6b4e91789c9e6871014ad3a1098f268b] hfiles 2024-12-09T11:00:34,300 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b202412094c73b75e33934c839e5a87384151c4e8_e4d7e2d2271822e45776321fc76f1589 2024-12-09T11:00:34,300 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e20241209118e179f4a9545d29eb38127d5bb75d8_6b4e91789c9e6871014ad3a1098f268b 2024-12-09T11:00:34,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742108_1284 (size=291) 2024-12-09T11:00:34,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742108_1284 (size=291) 2024-12-09T11:00:34,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742108_1284 (size=291) 2024-12-09T11:00:34,385 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T11:00:34,385 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testConsecutiveExports 2024-12-09T11:00:34,386 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-09T11:00:34,502 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T11:00:34,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742109_1285 (size=951) 2024-12-09T11:00:34,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742109_1285 (size=951) 2024-12-09T11:00:34,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742109_1285 (size=951) 2024-12-09T11:00:34,542 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T11:00:34,572 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T11:00:34,572 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-09T11:00:34,577 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T11:00:34,577 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-09T11:00:34,580 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=122, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 1.6700 sec 2024-12-09T11:00:34,793 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0004_000001 (auth:SIMPLE) from 127.0.0.1:43974 2024-12-09T11:00:35,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-09T11:00:35,062 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-12-09T11:00:35,063 INFO [Time-limited test {}] snapshot.TestExportSnapshot(523): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/local-export-1733742035062 2024-12-09T11:00:35,063 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/local-export-1733742035062, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/local-export-1733742035062, srcFsUri=hdfs://localhost:35869, srcDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T11:00:35,101 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:35869, inputRoot=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T11:00:35,101 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@1d575971, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/local-export-1733742035062, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/local-export-1733742035062/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-09T11:00:35,103 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-09T11:00:35,113 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/local-export-1733742035062/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-09T11:00:35,238 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:00:35,239 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:00:35,239 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:00:35,690 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T11:00:36,469 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-09T11:00:36,469 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports Metrics about Tables on a single HBase RegionServer 2024-12-09T11:00:36,470 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-09T11:00:36,527 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/hadoop-12582164541553961294.jar 2024-12-09T11:00:36,528 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:00:36,528 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:00:36,597 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/hadoop-16222933575126908421.jar 2024-12-09T11:00:36,598 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:00:36,598 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:00:36,598 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:00:36,598 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:00:36,599 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:00:36,599 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:00:36,599 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-09T11:00:36,599 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-09T11:00:36,600 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-09T11:00:36,600 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-09T11:00:36,600 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-09T11:00:36,601 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-09T11:00:36,601 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-09T11:00:36,602 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-09T11:00:36,602 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-09T11:00:36,602 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-09T11:00:36,603 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-09T11:00:36,603 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T11:00:36,603 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T11:00:36,604 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T11:00:36,604 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T11:00:36,604 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T11:00:36,605 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T11:00:36,605 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T11:00:36,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742110_1286 (size=24020) 2024-12-09T11:00:36,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742110_1286 (size=24020) 2024-12-09T11:00:36,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742110_1286 (size=24020) 2024-12-09T11:00:36,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742111_1287 (size=77755) 2024-12-09T11:00:36,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742111_1287 (size=77755) 2024-12-09T11:00:36,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742111_1287 (size=77755) 2024-12-09T11:00:36,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742112_1288 (size=131360) 2024-12-09T11:00:36,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742112_1288 (size=131360) 2024-12-09T11:00:36,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742112_1288 (size=131360) 2024-12-09T11:00:36,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742113_1289 (size=111793) 2024-12-09T11:00:36,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742113_1289 (size=111793) 2024-12-09T11:00:36,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742113_1289 (size=111793) 2024-12-09T11:00:36,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742114_1290 (size=6425021) 2024-12-09T11:00:36,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742114_1290 (size=6425021) 2024-12-09T11:00:36,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742114_1290 (size=6425021) 2024-12-09T11:00:36,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742115_1291 (size=1832290) 2024-12-09T11:00:36,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742115_1291 (size=1832290) 2024-12-09T11:00:36,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742115_1291 (size=1832290) 2024-12-09T11:00:36,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742116_1292 (size=8360282) 2024-12-09T11:00:36,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742116_1292 (size=8360282) 2024-12-09T11:00:36,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742116_1292 (size=8360282) 2024-12-09T11:00:37,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742117_1293 (size=503880) 2024-12-09T11:00:37,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742117_1293 (size=503880) 2024-12-09T11:00:37,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742117_1293 (size=503880) 2024-12-09T11:00:37,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742118_1294 (size=322274) 2024-12-09T11:00:37,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742118_1294 (size=322274) 2024-12-09T11:00:37,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742118_1294 (size=322274) 2024-12-09T11:00:37,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742119_1295 (size=20406) 2024-12-09T11:00:37,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742119_1295 (size=20406) 2024-12-09T11:00:37,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742119_1295 (size=20406) 2024-12-09T11:00:37,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742120_1296 (size=45609) 2024-12-09T11:00:37,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742120_1296 (size=45609) 2024-12-09T11:00:37,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742120_1296 (size=45609) 2024-12-09T11:00:37,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742121_1297 (size=136454) 2024-12-09T11:00:37,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742121_1297 (size=136454) 2024-12-09T11:00:37,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742121_1297 (size=136454) 2024-12-09T11:00:37,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742122_1298 (size=1597136) 2024-12-09T11:00:37,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742122_1298 (size=1597136) 2024-12-09T11:00:37,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742122_1298 (size=1597136) 2024-12-09T11:00:37,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742123_1299 (size=30873) 2024-12-09T11:00:37,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742123_1299 (size=30873) 2024-12-09T11:00:37,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742123_1299 (size=30873) 2024-12-09T11:00:37,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742124_1300 (size=29229) 2024-12-09T11:00:37,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742124_1300 (size=29229) 2024-12-09T11:00:37,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742124_1300 (size=29229) 2024-12-09T11:00:37,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742125_1301 (size=903861) 2024-12-09T11:00:37,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742125_1301 (size=903861) 2024-12-09T11:00:37,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742125_1301 (size=903861) 2024-12-09T11:00:37,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742126_1302 (size=5175431) 2024-12-09T11:00:37,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742126_1302 (size=5175431) 2024-12-09T11:00:37,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742126_1302 (size=5175431) 2024-12-09T11:00:37,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742127_1303 (size=232881) 2024-12-09T11:00:37,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742127_1303 (size=232881) 2024-12-09T11:00:37,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742127_1303 (size=232881) 2024-12-09T11:00:37,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742128_1304 (size=1323991) 2024-12-09T11:00:37,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742128_1304 (size=1323991) 2024-12-09T11:00:37,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742128_1304 (size=1323991) 2024-12-09T11:00:37,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742129_1305 (size=4695811) 2024-12-09T11:00:37,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742129_1305 (size=4695811) 2024-12-09T11:00:37,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742129_1305 (size=4695811) 2024-12-09T11:00:38,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742130_1306 (size=1877034) 2024-12-09T11:00:38,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742130_1306 (size=1877034) 2024-12-09T11:00:38,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742130_1306 (size=1877034) 2024-12-09T11:00:38,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742131_1307 (size=217555) 2024-12-09T11:00:38,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742131_1307 (size=217555) 2024-12-09T11:00:38,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742131_1307 (size=217555) 2024-12-09T11:00:38,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742132_1308 (size=443171) 2024-12-09T11:00:38,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742132_1308 (size=443171) 2024-12-09T11:00:38,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742132_1308 (size=443171) 2024-12-09T11:00:38,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742133_1309 (size=4188619) 2024-12-09T11:00:38,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742133_1309 (size=4188619) 2024-12-09T11:00:38,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742133_1309 (size=4188619) 2024-12-09T11:00:38,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742134_1310 (size=127628) 2024-12-09T11:00:38,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742134_1310 (size=127628) 2024-12-09T11:00:38,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742134_1310 (size=127628) 2024-12-09T11:00:38,753 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-09T11:00:38,773 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-09T11:00:38,792 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.7 K 2024-12-09T11:00:38,792 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.0 K 2024-12-09T11:00:38,792 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.6 K 2024-12-09T11:00:38,792 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=4.9 K 2024-12-09T11:00:38,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742135_1311 (size=1023) 2024-12-09T11:00:38,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742135_1311 (size=1023) 2024-12-09T11:00:38,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742135_1311 (size=1023) 2024-12-09T11:00:38,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742136_1312 (size=35) 2024-12-09T11:00:38,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742136_1312 (size=35) 2024-12-09T11:00:38,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742136_1312 (size=35) 2024-12-09T11:00:39,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742137_1313 (size=304128) 2024-12-09T11:00:39,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742137_1313 (size=304128) 2024-12-09T11:00:39,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742137_1313 (size=304128) 2024-12-09T11:00:39,119 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T11:00:39,119 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T11:00:39,611 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0005_000001 (auth:SIMPLE) from 127.0.0.1:57870 2024-12-09T11:00:39,953 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_2/usercache/jenkins/appcache/application_1733741775522_0004/container_1733741775522_0004_01_000001/launch_container.sh] 2024-12-09T11:00:39,953 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_2/usercache/jenkins/appcache/application_1733741775522_0004/container_1733741775522_0004_01_000001/container_tokens] 2024-12-09T11:00:39,953 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_2/usercache/jenkins/appcache/application_1733741775522_0004/container_1733741775522_0004_01_000001/sysfs] 2024-12-09T11:00:41,973 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T11:00:49,810 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0005_000001 (auth:SIMPLE) from 127.0.0.1:49672 2024-12-09T11:00:50,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742138_1314 (size=349826) 2024-12-09T11:00:50,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742138_1314 (size=349826) 2024-12-09T11:00:50,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742138_1314 (size=349826) 2024-12-09T11:00:52,102 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0005_000001 (auth:SIMPLE) from 127.0.0.1:46934 2024-12-09T11:00:52,102 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0005_000001 (auth:SIMPLE) from 127.0.0.1:35812 2024-12-09T11:00:52,943 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0005_000001 (auth:SIMPLE) from 127.0.0.1:46942 2024-12-09T11:00:52,949 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0005_000001 (auth:SIMPLE) from 127.0.0.1:35824 2024-12-09T11:00:55,808 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733741775522_0005_01_000006 while processing FINISH_CONTAINERS event 2024-12-09T11:01:00,525 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_3/usercache/jenkins/appcache/application_1733741775522_0005/container_1733741775522_0005_01_000004/launch_container.sh] 2024-12-09T11:01:00,525 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_3/usercache/jenkins/appcache/application_1733741775522_0005/container_1733741775522_0005_01_000004/container_tokens] 2024-12-09T11:01:00,525 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_3/usercache/jenkins/appcache/application_1733741775522_0005/container_1733741775522_0005_01_000004/sysfs] 2024-12-09T11:01:00,918 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_2/usercache/jenkins/appcache/application_1733741775522_0005/container_1733741775522_0005_01_000005/launch_container.sh] 2024-12-09T11:01:00,918 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_2/usercache/jenkins/appcache/application_1733741775522_0005/container_1733741775522_0005_01_000005/container_tokens] 2024-12-09T11:01:00,918 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_2/usercache/jenkins/appcache/application_1733741775522_0005/container_1733741775522_0005_01_000005/sysfs] 2024-12-09T11:01:01,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742139_1315 (size=31814) 2024-12-09T11:01:01,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742139_1315 (size=31814) 2024-12-09T11:01:01,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742139_1315 (size=31814) 2024-12-09T11:01:01,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742140_1316 (size=463) 2024-12-09T11:01:01,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742140_1316 (size=463) 2024-12-09T11:01:01,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742140_1316 (size=463) 2024-12-09T11:01:01,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742141_1317 (size=31814) 2024-12-09T11:01:01,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742141_1317 (size=31814) 2024-12-09T11:01:01,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742141_1317 (size=31814) 2024-12-09T11:01:01,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742142_1318 (size=349826) 2024-12-09T11:01:01,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742142_1318 (size=349826) 2024-12-09T11:01:01,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742142_1318 (size=349826) 2024-12-09T11:01:01,434 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0005_000001 (auth:SIMPLE) from 127.0.0.1:34510 2024-12-09T11:01:03,279 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_2/usercache/jenkins/appcache/application_1733741775522_0005/container_1733741775522_0005_01_000002/launch_container.sh] 2024-12-09T11:01:03,279 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_2/usercache/jenkins/appcache/application_1733741775522_0005/container_1733741775522_0005_01_000002/container_tokens] 2024-12-09T11:01:03,279 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_2/usercache/jenkins/appcache/application_1733741775522_0005/container_1733741775522_0005_01_000002/sysfs] 2024-12-09T11:01:03,462 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-09T11:01:03,462 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-09T11:01:03,502 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testConsecutiveExports 2024-12-09T11:01:03,502 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-09T11:01:03,502 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-09T11:01:03,503 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1751713061_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-09T11:01:03,532 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-09T11:01:03,532 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-09T11:01:03,532 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in org.apache.hadoop.fs.LocalFileSystem@1d575971 in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/local-export-1733742035062/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/local-export-1733742035062/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-09T11:01:03,533 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/local-export-1733742035062/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-09T11:01:03,533 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/local-export-1733742035062/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-09T11:01:03,535 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/local-export-1733742035062, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/local-export-1733742035062, srcFsUri=hdfs://localhost:35869, srcDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T11:01:03,629 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:35869, inputRoot=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T11:01:03,629 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@1d575971, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/local-export-1733742035062, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/local-export-1733742035062/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-09T11:01:03,634 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-09T11:01:03,660 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/local-export-1733742035062/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-09T11:01:03,717 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:01:03,717 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:01:03,718 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:01:04,502 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T11:01:05,054 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/hadoop-14801559023891190561.jar 2024-12-09T11:01:05,055 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:01:05,055 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:01:05,143 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/hadoop-17524070990407681136.jar 2024-12-09T11:01:05,143 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:01:05,144 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:01:05,144 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:01:05,144 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:01:05,145 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:01:05,145 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:01:05,145 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-09T11:01:05,145 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-09T11:01:05,146 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-09T11:01:05,146 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-09T11:01:05,146 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-09T11:01:05,147 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-09T11:01:05,147 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-09T11:01:05,147 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-09T11:01:05,148 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-09T11:01:05,148 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-09T11:01:05,148 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-09T11:01:05,149 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T11:01:05,149 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T11:01:05,149 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T11:01:05,149 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T11:01:05,150 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T11:01:05,150 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T11:01:05,150 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T11:01:05,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742143_1319 (size=24020) 2024-12-09T11:01:05,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742143_1319 (size=24020) 2024-12-09T11:01:05,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742143_1319 (size=24020) 2024-12-09T11:01:05,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742144_1320 (size=77755) 2024-12-09T11:01:05,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742144_1320 (size=77755) 2024-12-09T11:01:05,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742144_1320 (size=77755) 2024-12-09T11:01:05,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742145_1321 (size=131360) 2024-12-09T11:01:05,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742145_1321 (size=131360) 2024-12-09T11:01:05,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742145_1321 (size=131360) 2024-12-09T11:01:05,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742146_1322 (size=111793) 2024-12-09T11:01:05,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742146_1322 (size=111793) 2024-12-09T11:01:05,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742146_1322 (size=111793) 2024-12-09T11:01:05,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742147_1323 (size=1832290) 2024-12-09T11:01:05,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742147_1323 (size=1832290) 2024-12-09T11:01:05,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742147_1323 (size=1832290) 2024-12-09T11:01:05,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742148_1324 (size=8360282) 2024-12-09T11:01:05,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742148_1324 (size=8360282) 2024-12-09T11:01:05,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742148_1324 (size=8360282) 2024-12-09T11:01:05,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742149_1325 (size=503880) 2024-12-09T11:01:05,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742149_1325 (size=503880) 2024-12-09T11:01:05,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742149_1325 (size=503880) 2024-12-09T11:01:05,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742150_1326 (size=443171) 2024-12-09T11:01:05,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742150_1326 (size=443171) 2024-12-09T11:01:05,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742150_1326 (size=443171) 2024-12-09T11:01:05,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742151_1327 (size=322274) 2024-12-09T11:01:05,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742151_1327 (size=322274) 2024-12-09T11:01:05,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742151_1327 (size=322274) 2024-12-09T11:01:05,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742152_1328 (size=20406) 2024-12-09T11:01:05,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742152_1328 (size=20406) 2024-12-09T11:01:05,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742152_1328 (size=20406) 2024-12-09T11:01:05,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742153_1329 (size=45609) 2024-12-09T11:01:05,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742153_1329 (size=45609) 2024-12-09T11:01:05,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742153_1329 (size=45609) 2024-12-09T11:01:05,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742154_1330 (size=136454) 2024-12-09T11:01:05,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742154_1330 (size=136454) 2024-12-09T11:01:05,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742154_1330 (size=136454) 2024-12-09T11:01:05,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742155_1331 (size=1597136) 2024-12-09T11:01:05,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742155_1331 (size=1597136) 2024-12-09T11:01:05,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742155_1331 (size=1597136) 2024-12-09T11:01:05,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742156_1332 (size=30873) 2024-12-09T11:01:05,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742156_1332 (size=30873) 2024-12-09T11:01:05,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742156_1332 (size=30873) 2024-12-09T11:01:05,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742157_1333 (size=29229) 2024-12-09T11:01:05,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742157_1333 (size=29229) 2024-12-09T11:01:05,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742157_1333 (size=29229) 2024-12-09T11:01:05,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742158_1334 (size=903861) 2024-12-09T11:01:05,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742158_1334 (size=903861) 2024-12-09T11:01:05,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742158_1334 (size=903861) 2024-12-09T11:01:05,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742159_1335 (size=6425021) 2024-12-09T11:01:05,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742159_1335 (size=6425021) 2024-12-09T11:01:05,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742159_1335 (size=6425021) 2024-12-09T11:01:05,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742160_1336 (size=5175431) 2024-12-09T11:01:05,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742160_1336 (size=5175431) 2024-12-09T11:01:05,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742160_1336 (size=5175431) 2024-12-09T11:01:05,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742161_1337 (size=232881) 2024-12-09T11:01:05,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742161_1337 (size=232881) 2024-12-09T11:01:05,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742161_1337 (size=232881) 2024-12-09T11:01:05,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742162_1338 (size=1323991) 2024-12-09T11:01:05,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742162_1338 (size=1323991) 2024-12-09T11:01:05,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742162_1338 (size=1323991) 2024-12-09T11:01:05,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742163_1339 (size=4695811) 2024-12-09T11:01:05,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742163_1339 (size=4695811) 2024-12-09T11:01:05,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742163_1339 (size=4695811) 2024-12-09T11:01:05,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742164_1340 (size=1877034) 2024-12-09T11:01:05,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742164_1340 (size=1877034) 2024-12-09T11:01:05,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742164_1340 (size=1877034) 2024-12-09T11:01:05,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742165_1341 (size=217555) 2024-12-09T11:01:05,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742165_1341 (size=217555) 2024-12-09T11:01:05,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742165_1341 (size=217555) 2024-12-09T11:01:05,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742166_1342 (size=4188619) 2024-12-09T11:01:05,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742166_1342 (size=4188619) 2024-12-09T11:01:05,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742166_1342 (size=4188619) 2024-12-09T11:01:05,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742167_1343 (size=127628) 2024-12-09T11:01:05,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742167_1343 (size=127628) 2024-12-09T11:01:05,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742167_1343 (size=127628) 2024-12-09T11:01:05,837 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-09T11:01:05,840 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-09T11:01:05,843 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.7 K 2024-12-09T11:01:05,843 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.0 K 2024-12-09T11:01:05,843 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.6 K 2024-12-09T11:01:05,843 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=4.9 K 2024-12-09T11:01:05,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742168_1344 (size=1023) 2024-12-09T11:01:05,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742168_1344 (size=1023) 2024-12-09T11:01:05,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742168_1344 (size=1023) 2024-12-09T11:01:05,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742169_1345 (size=35) 2024-12-09T11:01:05,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742169_1345 (size=35) 2024-12-09T11:01:05,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742169_1345 (size=35) 2024-12-09T11:01:05,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742170_1346 (size=304128) 2024-12-09T11:01:05,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742170_1346 (size=304128) 2024-12-09T11:01:05,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742170_1346 (size=304128) 2024-12-09T11:01:06,272 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_3/usercache/jenkins/appcache/application_1733741775522_0005/container_1733741775522_0005_01_000003/launch_container.sh] 2024-12-09T11:01:06,273 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_3/usercache/jenkins/appcache/application_1733741775522_0005/container_1733741775522_0005_01_000003/container_tokens] 2024-12-09T11:01:06,273 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_3/usercache/jenkins/appcache/application_1733741775522_0005/container_1733741775522_0005_01_000003/sysfs] 2024-12-09T11:01:07,005 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=660.99 KB, freeSize=879.35 MB, max=880 MB, blockCount=0, accesses=0, hits=0, hitRatio=0, cachingAccesses=0, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-12-09T11:01:07,073 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=919.14 KB, freeSize=879.10 MB, max=880 MB, blockCount=3, accesses=5, hits=2, hitRatio=40.00%, , cachingAccesses=5, cachingHits=2, cachingHitsRatio=40.00%, evictions=29, evicted=0, evictedPerRun=0.0 2024-12-09T11:01:07,129 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=660.99 KB, freeSize=879.35 MB, max=880 MB, blockCount=0, accesses=0, hits=0, hitRatio=0, cachingAccesses=0, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-12-09T11:01:07,243 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=2, created chunk count=10, reused chunk count=10, reuseRatio=50.00% 2024-12-09T11:01:07,243 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-09T11:01:07,661 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T11:01:07,662 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T11:01:07,681 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0005_000001 (auth:SIMPLE) from 127.0.0.1:52420 2024-12-09T11:01:07,720 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_2/usercache/jenkins/appcache/application_1733741775522_0005/container_1733741775522_0005_01_000001/launch_container.sh] 2024-12-09T11:01:07,720 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_2/usercache/jenkins/appcache/application_1733741775522_0005/container_1733741775522_0005_01_000001/container_tokens] 2024-12-09T11:01:07,720 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_2/usercache/jenkins/appcache/application_1733741775522_0005/container_1733741775522_0005_01_000001/sysfs] 2024-12-09T11:01:08,143 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0006_000001 (auth:SIMPLE) from 127.0.0.1:34524 2024-12-09T11:01:09,145 INFO [regionserver/3469f9ca0af3:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-09T11:01:09,149 INFO [regionserver/3469f9ca0af3:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-09T11:01:09,162 INFO [regionserver/3469f9ca0af3:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-09T11:01:10,513 DEBUG [master/3469f9ca0af3:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T11:01:10,513 DEBUG [master/3469f9ca0af3:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region e4d7e2d2271822e45776321fc76f1589 changed from -1.0 to 0.0, refreshing cache 2024-12-09T11:01:10,515 DEBUG [master/3469f9ca0af3:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 6b4e91789c9e6871014ad3a1098f268b changed from -1.0 to 0.0, refreshing cache 2024-12-09T11:01:10,519 DEBUG [master/3469f9ca0af3:0.Chore.1 {}] rsgroup.RSGroupBasedLoadBalancer(138): Balancing RSGroup=default 2024-12-09T11:01:10,519 INFO [master/3469f9ca0af3:0.Chore.1 {}] rsgroup.RSGroupBasedLoadBalancer(151): Start Generate Balance plan for group: default 2024-12-09T11:01:10,519 DEBUG [master/3469f9ca0af3:0.Chore.1 {}] balancer.BaseLoadBalancer(619): Start Generate Balance plan for cluster. 2024-12-09T11:01:10,520 DEBUG [master/3469f9ca0af3:0.Chore.1 {}] balancer.BalancerClusterState(204): Hosts are {3469f9ca0af3=0} racks are {/default-rack=0} 2024-12-09T11:01:10,520 DEBUG [master/3469f9ca0af3:0.Chore.1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T11:01:10,520 DEBUG [master/3469f9ca0af3:0.Chore.1 {}] balancer.BalancerClusterState(303): server 1 has 3 regions 2024-12-09T11:01:10,520 DEBUG [master/3469f9ca0af3:0.Chore.1 {}] balancer.BalancerClusterState(303): server 2 has 1 regions 2024-12-09T11:01:10,520 DEBUG [master/3469f9ca0af3:0.Chore.1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T11:01:10,520 DEBUG [master/3469f9ca0af3:0.Chore.1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T11:01:10,520 DEBUG [master/3469f9ca0af3:0.Chore.1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T11:01:10,520 INFO [master/3469f9ca0af3:0.Chore.1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T11:01:10,520 INFO [master/3469f9ca0af3:0.Chore.1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T11:01:10,520 INFO [master/3469f9ca0af3:0.Chore.1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T11:01:10,520 DEBUG [master/3469f9ca0af3:0.Chore.1 {}] balancer.BalancerClusterState(326): Number of tables=3, number of hosts=1, number of racks=1 2024-12-09T11:01:10,549 INFO [master/3469f9ca0af3:0.Chore.1 {}] balancer.StochasticLoadBalancer(370): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.5485837703548635, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=1.0, need balance); CPRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=1.0, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-12-09T11:01:10,549 INFO [master/3469f9ca0af3:0.Chore.1 {}] balancer.StochasticLoadBalancer(515): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.4683556592708925, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.5485837703548635, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=1.0, need balance); CPRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=1.0, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=9600 2024-12-09T11:01:10,987 INFO [master/3469f9ca0af3:0.Chore.1 {}] balancer.StochasticLoadBalancer(562): Finished computing new moving plan. Computation took 466 ms to try 9600 different iterations. Found a solution that moves 1 regions; Going from a computed imbalance of 0.4683556592708925 to a new imbalance of 0.016937562104004985. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.25, need balance); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.8390583358649146, need balance); CPRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.8671617035612909, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-12-09T11:01:10,999 INFO [master/3469f9ca0af3:0.Chore.1 {}] master.HMaster(2167): Balancer plans size is 1, the balance interval is 300000 ms, and the max number regions in transition is 4 2024-12-09T11:01:10,999 INFO [master/3469f9ca0af3:0.Chore.1 {}] master.HMaster(2172): balance hri=1588230740, source=3469f9ca0af3,39691,1733741766880, destination=3469f9ca0af3,33293,1733741767044 2024-12-09T11:01:11,003 DEBUG [master/3469f9ca0af3:0.Chore.1 {}] procedure2.ProcedureExecutor(1139): Stored pid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, REOPEN/MOVE 2024-12-09T11:01:11,003 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, REOPEN/MOVE 2024-12-09T11:01:11,005 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=125 updating hbase:meta row=1588230740, regionState=CLOSING, regionLocation=3469f9ca0af3,39691,1733741766880 2024-12-09T11:01:11,006 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3469f9ca0af3,39691,1733741766880, state=CLOSING 2024-12-09T11:01:11,014 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T11:01:11,014 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T11:01:11,014 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T11:01:11,014 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T11:01:11,014 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, REOPEN/MOVE 2024-12-09T11:01:11,014 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:01:11,015 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:01:11,015 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T11:01:11,015 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1588230740, server=3469f9ca0af3,39691,1733741766880}] 2024-12-09T11:01:11,015 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:01:11,017 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:01:11,178 INFO [regionserver/3469f9ca0af3:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(1763): MemstoreFlusherChore requesting flush of hbase:meta,,1.1588230740 because 1588230740/info has an old edit so flush to free WALs after random delay 226483 ms 2024-12-09T11:01:11,185 INFO [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META, pid=126}] handler.UnassignRegionHandler(122): Close 1588230740 2024-12-09T11:01:11,185 DEBUG [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META, pid=126}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T11:01:11,185 DEBUG [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META, pid=126}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T11:01:11,185 INFO [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META, pid=126}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T11:01:11,185 DEBUG [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META, pid=126}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T11:01:11,185 DEBUG [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META, pid=126}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T11:01:11,185 DEBUG [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META, pid=126}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T11:01:11,186 INFO [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META, pid=126}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=41.62 KB heapSize=67.02 KB 2024-12-09T11:01:11,259 DEBUG [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META, pid=126}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/.tmp/info/8b7a32eb927043518fe4b322bc475a4d is 181, key is testtb-testConsecutiveExports,1,1733742030439.e4d7e2d2271822e45776321fc76f1589./info:regioninfo/1733742030858/Put/seqid=0 2024-12-09T11:01:11,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742171_1347 (size=12261) 2024-12-09T11:01:11,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742171_1347 (size=12261) 2024-12-09T11:01:11,337 INFO [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META, pid=126}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.27 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/.tmp/info/8b7a32eb927043518fe4b322bc475a4d 2024-12-09T11:01:11,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742171_1347 (size=12261) 2024-12-09T11:01:11,406 DEBUG [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META, pid=126}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/.tmp/ns/29cd3781f3084ee7ac788aacc109ce60 is 123, key is testtb-testExportFileSystemStateWithSplitRegion,1,1733741778756.8d369bd75555122cbc103d82c8629467./ns:/1733741922614/DeleteFamily/seqid=0 2024-12-09T11:01:11,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742172_1348 (size=6857) 2024-12-09T11:01:11,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742172_1348 (size=6857) 2024-12-09T11:01:11,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742172_1348 (size=6857) 2024-12-09T11:01:11,480 INFO [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META, pid=126}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.41 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/.tmp/ns/29cd3781f3084ee7ac788aacc109ce60 2024-12-09T11:01:11,522 DEBUG [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META, pid=126}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/.tmp/rep_barrier/2aa3a37b0f664bbf944a8482ed64d2e0 is 132, key is testtb-testExportFileSystemStateWithSplitRegion,1,1733741778756.8d369bd75555122cbc103d82c8629467./rep_barrier:/1733741922614/DeleteFamily/seqid=0 2024-12-09T11:01:11,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742173_1349 (size=7079) 2024-12-09T11:01:11,608 INFO [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META, pid=126}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/.tmp/rep_barrier/2aa3a37b0f664bbf944a8482ed64d2e0 2024-12-09T11:01:11,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742173_1349 (size=7079) 2024-12-09T11:01:11,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742173_1349 (size=7079) 2024-12-09T11:01:11,675 DEBUG [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META, pid=126}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/.tmp/table/9b92d4a9b20a4540b9967979e6a52c29 is 126, key is testtb-testExportFileSystemStateWithSplitRegion,1,1733741778756.8d369bd75555122cbc103d82c8629467./table:/1733741922614/DeleteFamily/seqid=0 2024-12-09T11:01:11,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742174_1350 (size=7460) 2024-12-09T11:01:11,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742174_1350 (size=7460) 2024-12-09T11:01:11,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742174_1350 (size=7460) 2024-12-09T11:01:11,813 INFO [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META, pid=126}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.48 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/.tmp/table/9b92d4a9b20a4540b9967979e6a52c29 2024-12-09T11:01:11,829 DEBUG [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META, pid=126}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/.tmp/info/8b7a32eb927043518fe4b322bc475a4d as hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/info/8b7a32eb927043518fe4b322bc475a4d 2024-12-09T11:01:11,835 INFO [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META, pid=126}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/info/8b7a32eb927043518fe4b322bc475a4d, entries=56, sequenceid=119, filesize=12.0 K 2024-12-09T11:01:11,836 DEBUG [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META, pid=126}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/.tmp/ns/29cd3781f3084ee7ac788aacc109ce60 as hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/ns/29cd3781f3084ee7ac788aacc109ce60 2024-12-09T11:01:11,860 INFO [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META, pid=126}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/ns/29cd3781f3084ee7ac788aacc109ce60, entries=15, sequenceid=119, filesize=6.7 K 2024-12-09T11:01:11,862 DEBUG [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META, pid=126}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/.tmp/rep_barrier/2aa3a37b0f664bbf944a8482ed64d2e0 as hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/rep_barrier/2aa3a37b0f664bbf944a8482ed64d2e0 2024-12-09T11:01:11,871 INFO [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META, pid=126}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/rep_barrier/2aa3a37b0f664bbf944a8482ed64d2e0, entries=13, sequenceid=119, filesize=6.9 K 2024-12-09T11:01:11,873 DEBUG [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META, pid=126}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/.tmp/table/9b92d4a9b20a4540b9967979e6a52c29 as hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/table/9b92d4a9b20a4540b9967979e6a52c29 2024-12-09T11:01:11,891 INFO [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META, pid=126}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/table/9b92d4a9b20a4540b9967979e6a52c29, entries=23, sequenceid=119, filesize=7.3 K 2024-12-09T11:01:11,899 INFO [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META, pid=126}] regionserver.HRegion(3140): Finished flush of dataSize ~41.62 KB/42621, heapSize ~66.95 KB/68560, currentSize=0 B/0 for 1588230740 in 713ms, sequenceid=119, compaction requested=false 2024-12-09T11:01:11,990 DEBUG [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META, pid=126}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/recovered.edits/122.seqid, newMaxSeqId=122, maxSeqId=1 2024-12-09T11:01:12,003 DEBUG [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META, pid=126}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T11:01:12,003 DEBUG [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META, pid=126}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T11:01:12,003 INFO [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META, pid=126}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T11:01:12,003 DEBUG [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META, pid=126}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733742071185Running coprocessor pre-close hooks at 1733742071185Disabling compacts and flushes for region at 1733742071185Disabling writes for close at 1733742071185Obtaining lock to block concurrent updates at 1733742071186 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733742071186Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=42621, getHeapSize=68560, getOffHeapSize=0, getCellsCount=331 at 1733742071186Flushing stores of hbase:meta,,1.1588230740 at 1733742071190 (+4 ms)Flushing 1588230740/info: creating writer at 1733742071190Flushing 1588230740/info: appending metadata at 1733742071259 (+69 ms)Flushing 1588230740/info: closing flushed file at 1733742071259Flushing 1588230740/ns: creating writer at 1733742071354 (+95 ms)Flushing 1588230740/ns: appending metadata at 1733742071404 (+50 ms)Flushing 1588230740/ns: closing flushed file at 1733742071404Flushing 1588230740/rep_barrier: creating writer at 1733742071492 (+88 ms)Flushing 1588230740/rep_barrier: appending metadata at 1733742071522 (+30 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1733742071522Flushing 1588230740/table: creating writer at 1733742071625 (+103 ms)Flushing 1588230740/table: appending metadata at 1733742071675 (+50 ms)Flushing 1588230740/table: closing flushed file at 1733742071675Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@365d6480: reopening flushed file at 1733742071825 (+150 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@551815b3: reopening flushed file at 1733742071836 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6dd5ba5e: reopening flushed file at 1733742071861 (+25 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@58f98843: reopening flushed file at 1733742071872 (+11 ms)Finished flush of dataSize ~41.62 KB/42621, heapSize ~66.95 KB/68560, currentSize=0 B/0 for 1588230740 in 713ms, sequenceid=119, compaction requested=false at 1733742071899 (+27 ms)Writing region close event to WAL at 1733742071958 (+59 ms)Running coprocessor post-close hooks at 1733742072003 (+45 ms)Closed at 1733742072003 2024-12-09T11:01:12,004 INFO [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META, pid=126}] regionserver.HRegionServer(3302): Adding 1588230740 move to 3469f9ca0af3,33293,1733741767044 record at close sequenceid=119 2024-12-09T11:01:12,018 INFO [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META, pid=126}] handler.UnassignRegionHandler(157): Closed 1588230740 2024-12-09T11:01:12,022 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=125 updating hbase:meta row=1588230740, regionState=CLOSED 2024-12-09T11:01:12,023 WARN [PEWorker-2 {}] zookeeper.MetaTableLocator(168): Tried to set null ServerName in hbase:meta; skipping -- ServerName required 2024-12-09T11:01:12,023 DEBUG [PEWorker-2 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=126, ppid=125, state=RUNNABLE, hasLock=true; CloseRegionProcedure 1588230740, server=3469f9ca0af3,39691,1733741766880 2024-12-09T11:01:12,026 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=126, resume processing ppid=125 2024-12-09T11:01:12,027 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=126, ppid=125, state=SUCCESS, hasLock=false; CloseRegionProcedure 1588230740, server=3469f9ca0af3,39691,1733741766880 in 1.0080 sec 2024-12-09T11:01:12,028 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=125, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, REOPEN/MOVE; state=CLOSED, location=3469f9ca0af3,33293,1733741767044; forceNewPlan=false, retain=false 2024-12-09T11:01:12,180 INFO [3469f9ca0af3:35815 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-09T11:01:12,180 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=125 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3469f9ca0af3,33293,1733741767044 2024-12-09T11:01:12,182 INFO [PEWorker-1 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3469f9ca0af3,33293,1733741767044, state=OPENING 2024-12-09T11:01:12,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T11:01:12,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T11:01:12,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T11:01:12,190 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:01:12,191 DEBUG [PEWorker-1 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=125, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, REOPEN/MOVE 2024-12-09T11:01:12,191 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=127, ppid=125, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3469f9ca0af3,33293,1733741767044}] 2024-12-09T11:01:12,191 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:01:12,192 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:01:12,193 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T11:01:12,198 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:01:12,370 INFO [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=127}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T11:01:12,370 INFO [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=127}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T11:01:12,371 INFO [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=127}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-09T11:01:12,373 INFO [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=127}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3469f9ca0af3%2C33293%2C1733741767044.meta, suffix=.meta, logDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/WALs/3469f9ca0af3,33293,1733741767044, archiveDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/oldWALs, maxLogs=32 2024-12-09T11:01:12,408 DEBUG [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=127}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/WALs/3469f9ca0af3,33293,1733741767044/3469f9ca0af3%2C33293%2C1733741767044.meta.1733742072374.meta, exclude list is [], retry=0 2024-12-09T11:01:12,411 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45147,DS-0ea321db-fa57-4e5a-b83c-91cd41720647,DISK] 2024-12-09T11:01:12,412 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44813,DS-72528082-dc25-49fc-b312-bbbc6bcb4023,DISK] 2024-12-09T11:01:12,413 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34611,DS-cbedc1ca-5b7c-4e25-a21f-16d809984ba5,DISK] 2024-12-09T11:01:12,462 INFO [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=127}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/WALs/3469f9ca0af3,33293,1733741767044/3469f9ca0af3%2C33293%2C1733741767044.meta.1733742072374.meta 2024-12-09T11:01:12,467 DEBUG [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=127}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36613:36613),(127.0.0.1/127.0.0.1:38997:38997),(127.0.0.1/127.0.0.1:33003:33003)] 2024-12-09T11:01:12,468 DEBUG [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=127}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:01:12,468 DEBUG [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=127}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=AccessControlService 2024-12-09T11:01:12,469 INFO [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=127}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T11:01:12,469 DEBUG [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=127}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T11:01:12,469 DEBUG [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=127}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T11:01:12,469 INFO [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=127}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T11:01:12,469 DEBUG [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=127}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T11:01:12,469 DEBUG [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=127}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:01:12,469 DEBUG [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=127}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T11:01:12,469 DEBUG [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=127}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T11:01:12,491 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T11:01:12,493 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T11:01:12,493 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:01:12,562 DEBUG [StoreOpener-1588230740-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/info/8b7a32eb927043518fe4b322bc475a4d 2024-12-09T11:01:12,562 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:01:12,562 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T11:01:12,578 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T11:01:12,578 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:01:12,628 DEBUG [StoreOpener-1588230740-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/ns/29cd3781f3084ee7ac788aacc109ce60 2024-12-09T11:01:12,628 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:01:12,628 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T11:01:12,635 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T11:01:12,635 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:01:12,657 DEBUG [StoreOpener-1588230740-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/rep_barrier/2aa3a37b0f664bbf944a8482ed64d2e0 2024-12-09T11:01:12,657 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:01:12,657 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T11:01:12,662 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T11:01:12,662 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:01:12,715 DEBUG [StoreOpener-1588230740-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/table/9b92d4a9b20a4540b9967979e6a52c29 2024-12-09T11:01:12,715 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:01:12,715 DEBUG [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=127}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T11:01:12,716 DEBUG [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=127}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740 2024-12-09T11:01:12,718 DEBUG [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=127}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740 2024-12-09T11:01:12,719 DEBUG [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=127}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T11:01:12,719 DEBUG [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=127}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T11:01:12,720 DEBUG [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=127}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T11:01:12,722 DEBUG [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=127}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T11:01:12,723 INFO [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=127}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=123; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66577582, jitterRate=-0.00791671872138977}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T11:01:12,723 DEBUG [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=127}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T11:01:12,724 DEBUG [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=127}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733742072470Writing region info on filesystem at 1733742072470Initializing all the Stores at 1733742072471 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733742072471Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733742072491 (+20 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733742072491Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733742072491Cleaning up temporary data from old regions at 1733742072719 (+228 ms)Running coprocessor post-open hooks at 1733742072723 (+4 ms)Region opened successfully at 1733742072724 (+1 ms) 2024-12-09T11:01:12,731 INFO [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=127}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=127, masterSystemTime=1733742072357 2024-12-09T11:01:12,734 DEBUG [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=127}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T11:01:12,734 INFO [RS_OPEN_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_META, pid=127}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T11:01:12,735 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=125 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=123, regionLocation=3469f9ca0af3,33293,1733741767044 2024-12-09T11:01:12,736 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3469f9ca0af3,33293,1733741767044, state=OPEN 2024-12-09T11:01:12,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T11:01:12,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T11:01:12,744 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:01:12,744 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:01:12,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T11:01:12,746 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=127, ppid=125, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3469f9ca0af3,33293,1733741767044 2024-12-09T11:01:12,746 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:01:12,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T11:01:12,749 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=127, resume processing ppid=125 2024-12-09T11:01:12,749 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=127, ppid=125, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3469f9ca0af3,33293,1733741767044 in 555 msec 2024-12-09T11:01:12,749 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:01:12,752 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=125, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, REOPEN/MOVE in 1.7500 sec 2024-12-09T11:01:12,829 DEBUG [master/3469f9ca0af3:0.Chore.1 {}] master.HMaster(2203): Balancer is going into sleep until next period in 300000ms 2024-12-09T11:01:12,860 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39691 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Scan size: 123 connection: 172.17.0.2:38899 deadline: 1733742132855, exception=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=3469f9ca0af3 port=33293 startCode=1733741767044. As of locationSeqNum=119. 2024-12-09T11:01:12,866 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncRegionLocatorHelper(64): Try updating region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,39691,1733741766880, seqNum=-1 , the old value is region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,39691,1733741766880, seqNum=-1, error=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=3469f9ca0af3 port=33293 startCode=1733741767044. As of locationSeqNum=119. 2024-12-09T11:01:12,866 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,39691,1733741766880, seqNum=-1 is org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=3469f9ca0af3 port=33293 startCode=1733741767044. As of locationSeqNum=119. 2024-12-09T11:01:12,866 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncRegionLocatorHelper(84): Try updating region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,39691,1733741766880, seqNum=-1 with the new location region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,33293,1733741767044, seqNum=119 constructed by org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=3469f9ca0af3 port=33293 startCode=1733741767044. As of locationSeqNum=119. 2024-12-09T11:01:12,973 DEBUG [Async-Client-Retry-Timer-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:01:12,978 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35431, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:01:13,006 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(219): Skipping table testtb-testConsecutiveExports because normalization is disabled in its table properties and normalization is also disabled at table level by default 2024-12-09T11:01:15,663 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T11:01:15,733 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T11:01:15,829 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 6b4e91789c9e6871014ad3a1098f268b, had cached 0 bytes from a total of 5700 2024-12-09T11:01:15,829 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region e4d7e2d2271822e45776321fc76f1589, had cached 0 bytes from a total of 15055 2024-12-09T11:01:19,737 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0006_000001 (auth:SIMPLE) from 127.0.0.1:48816 2024-12-09T11:01:20,111 INFO [regionserver/3469f9ca0af3:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(1763): MemstoreFlusherChore requesting flush of hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb. because a0d08af07fc0beaa578cbd208923b1fb/l has an old edit so flush to free WALs after random delay 116693 ms 2024-12-09T11:01:20,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742176_1352 (size=349826) 2024-12-09T11:01:20,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742176_1352 (size=349826) 2024-12-09T11:01:20,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742176_1352 (size=349826) 2024-12-09T11:01:22,017 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0006_000001 (auth:SIMPLE) from 127.0.0.1:57160 2024-12-09T11:01:22,017 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0006_000001 (auth:SIMPLE) from 127.0.0.1:33428 2024-12-09T11:01:22,873 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0006_000001 (auth:SIMPLE) from 127.0.0.1:57176 2024-12-09T11:01:22,889 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0006_000001 (auth:SIMPLE) from 127.0.0.1:33442 2024-12-09T11:01:25,676 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733741775522_0006_01_000006 while processing FINISH_CONTAINERS event 2024-12-09T11:01:27,996 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_0/usercache/jenkins/appcache/application_1733741775522_0006/container_1733741775522_0006_01_000002/launch_container.sh] 2024-12-09T11:01:27,996 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_0/usercache/jenkins/appcache/application_1733741775522_0006/container_1733741775522_0006_01_000002/container_tokens] 2024-12-09T11:01:27,996 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_0/usercache/jenkins/appcache/application_1733741775522_0006/container_1733741775522_0006_01_000002/sysfs] 2024-12-09T11:01:30,253 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_3/usercache/jenkins/appcache/application_1733741775522_0006/container_1733741775522_0006_01_000004/launch_container.sh] 2024-12-09T11:01:30,253 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_3/usercache/jenkins/appcache/application_1733741775522_0006/container_1733741775522_0006_01_000004/container_tokens] 2024-12-09T11:01:30,253 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_3/usercache/jenkins/appcache/application_1733741775522_0006/container_1733741775522_0006_01_000004/sysfs] 2024-12-09T11:01:30,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742177_1353 (size=29752) 2024-12-09T11:01:30,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742177_1353 (size=29752) 2024-12-09T11:01:30,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742177_1353 (size=29752) 2024-12-09T11:01:30,411 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_1/usercache/jenkins/appcache/application_1733741775522_0006/container_1733741775522_0006_01_000003/launch_container.sh] 2024-12-09T11:01:30,411 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_1/usercache/jenkins/appcache/application_1733741775522_0006/container_1733741775522_0006_01_000003/container_tokens] 2024-12-09T11:01:30,411 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_1/usercache/jenkins/appcache/application_1733741775522_0006/container_1733741775522_0006_01_000003/sysfs] 2024-12-09T11:01:30,460 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_2/usercache/jenkins/appcache/application_1733741775522_0006/container_1733741775522_0006_01_000005/launch_container.sh] 2024-12-09T11:01:30,460 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_2/usercache/jenkins/appcache/application_1733741775522_0006/container_1733741775522_0006_01_000005/container_tokens] 2024-12-09T11:01:30,460 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_2/usercache/jenkins/appcache/application_1733741775522_0006/container_1733741775522_0006_01_000005/sysfs] 2024-12-09T11:01:30,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742178_1354 (size=463) 2024-12-09T11:01:30,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742178_1354 (size=463) 2024-12-09T11:01:30,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742178_1354 (size=463) 2024-12-09T11:01:30,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742179_1355 (size=29752) 2024-12-09T11:01:30,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742179_1355 (size=29752) 2024-12-09T11:01:30,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742179_1355 (size=29752) 2024-12-09T11:01:30,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742180_1356 (size=349826) 2024-12-09T11:01:30,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742180_1356 (size=349826) 2024-12-09T11:01:30,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742180_1356 (size=349826) 2024-12-09T11:01:30,872 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0006_000001 (auth:SIMPLE) from 127.0.0.1:46594 2024-12-09T11:01:30,880 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0006_000001 (auth:SIMPLE) from 127.0.0.1:55358 2024-12-09T11:01:30,885 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0006_000001 (auth:SIMPLE) from 127.0.0.1:46602 2024-12-09T11:01:32,728 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-09T11:01:32,728 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-09T11:01:32,734 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testConsecutiveExports 2024-12-09T11:01:32,734 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-09T11:01:32,735 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-09T11:01:32,735 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1751713061_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-09T11:01:32,743 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-09T11:01:32,743 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-09T11:01:32,743 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in org.apache.hadoop.fs.LocalFileSystem@1d575971 in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/local-export-1733742035062/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/local-export-1733742035062/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-09T11:01:32,744 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/local-export-1733742035062/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-09T11:01:32,744 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/local-export-1733742035062/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-09T11:01:32,765 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testConsecutiveExports 2024-12-09T11:01:32,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=128, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports 2024-12-09T11:01:32,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=128 2024-12-09T11:01:32,768 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733742092768"}]},"ts":"1733742092768"} 2024-12-09T11:01:32,771 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLING in hbase:meta 2024-12-09T11:01:32,771 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testConsecutiveExports to state=DISABLING 2024-12-09T11:01:32,772 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports}] 2024-12-09T11:01:32,774 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=130, ppid=129, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=6b4e91789c9e6871014ad3a1098f268b, UNASSIGN}, {pid=131, ppid=129, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=e4d7e2d2271822e45776321fc76f1589, UNASSIGN}] 2024-12-09T11:01:32,775 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=131, ppid=129, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=e4d7e2d2271822e45776321fc76f1589, UNASSIGN 2024-12-09T11:01:32,775 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=130, ppid=129, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=6b4e91789c9e6871014ad3a1098f268b, UNASSIGN 2024-12-09T11:01:32,775 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=131 updating hbase:meta row=e4d7e2d2271822e45776321fc76f1589, regionState=CLOSING, regionLocation=3469f9ca0af3,39691,1733741766880 2024-12-09T11:01:32,775 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=130 updating hbase:meta row=6b4e91789c9e6871014ad3a1098f268b, regionState=CLOSING, regionLocation=3469f9ca0af3,42349,1733741767108 2024-12-09T11:01:32,778 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=131, ppid=129, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=e4d7e2d2271822e45776321fc76f1589, UNASSIGN because future has completed 2024-12-09T11:01:32,779 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T11:01:32,779 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=132, ppid=131, state=RUNNABLE, hasLock=false; CloseRegionProcedure e4d7e2d2271822e45776321fc76f1589, server=3469f9ca0af3,39691,1733741766880}] 2024-12-09T11:01:32,779 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=130, ppid=129, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=6b4e91789c9e6871014ad3a1098f268b, UNASSIGN because future has completed 2024-12-09T11:01:32,780 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T11:01:32,780 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=133, ppid=130, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6b4e91789c9e6871014ad3a1098f268b, server=3469f9ca0af3,42349,1733741767108}] 2024-12-09T11:01:32,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=128 2024-12-09T11:01:32,931 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=132}] handler.UnassignRegionHandler(122): Close e4d7e2d2271822e45776321fc76f1589 2024-12-09T11:01:32,931 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=132}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T11:01:32,931 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=132}] regionserver.HRegion(1722): Closing e4d7e2d2271822e45776321fc76f1589, disabling compactions & flushes 2024-12-09T11:01:32,931 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=132}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1733742030439.e4d7e2d2271822e45776321fc76f1589. 2024-12-09T11:01:32,931 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=132}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1733742030439.e4d7e2d2271822e45776321fc76f1589. 2024-12-09T11:01:32,931 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=132}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1733742030439.e4d7e2d2271822e45776321fc76f1589. after waiting 0 ms 2024-12-09T11:01:32,931 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=132}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1733742030439.e4d7e2d2271822e45776321fc76f1589. 2024-12-09T11:01:32,932 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(122): Close 6b4e91789c9e6871014ad3a1098f268b 2024-12-09T11:01:32,932 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T11:01:32,932 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1722): Closing 6b4e91789c9e6871014ad3a1098f268b, disabling compactions & flushes 2024-12-09T11:01:32,932 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1733742030439.6b4e91789c9e6871014ad3a1098f268b. 2024-12-09T11:01:32,933 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1733742030439.6b4e91789c9e6871014ad3a1098f268b. 2024-12-09T11:01:32,933 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1733742030439.6b4e91789c9e6871014ad3a1098f268b. after waiting 0 ms 2024-12-09T11:01:32,933 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1733742030439.6b4e91789c9e6871014ad3a1098f268b. 2024-12-09T11:01:32,966 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testConsecutiveExports/6b4e91789c9e6871014ad3a1098f268b/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T11:01:32,967 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T11:01:32,967 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1733742030439.6b4e91789c9e6871014ad3a1098f268b. 2024-12-09T11:01:32,967 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1676): Region close journal for 6b4e91789c9e6871014ad3a1098f268b: Waiting for close lock at 1733742092932Running coprocessor pre-close hooks at 1733742092932Disabling compacts and flushes for region at 1733742092932Disabling writes for close at 1733742092933 (+1 ms)Writing region close event to WAL at 1733742092951 (+18 ms)Running coprocessor post-close hooks at 1733742092967 (+16 ms)Closed at 1733742092967 2024-12-09T11:01:32,970 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(157): Closed 6b4e91789c9e6871014ad3a1098f268b 2024-12-09T11:01:32,972 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=130 updating hbase:meta row=6b4e91789c9e6871014ad3a1098f268b, regionState=CLOSED 2024-12-09T11:01:32,976 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=133, ppid=130, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6b4e91789c9e6871014ad3a1098f268b, server=3469f9ca0af3,42349,1733741767108 because future has completed 2024-12-09T11:01:32,978 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=132}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testConsecutiveExports/e4d7e2d2271822e45776321fc76f1589/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T11:01:32,979 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=132}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T11:01:32,979 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=132}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1733742030439.e4d7e2d2271822e45776321fc76f1589. 2024-12-09T11:01:32,979 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=132}] regionserver.HRegion(1676): Region close journal for e4d7e2d2271822e45776321fc76f1589: Waiting for close lock at 1733742092931Running coprocessor pre-close hooks at 1733742092931Disabling compacts and flushes for region at 1733742092931Disabling writes for close at 1733742092931Writing region close event to WAL at 1733742092950 (+19 ms)Running coprocessor post-close hooks at 1733742092979 (+29 ms)Closed at 1733742092979 2024-12-09T11:01:32,981 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=133, resume processing ppid=130 2024-12-09T11:01:32,981 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=133, ppid=130, state=SUCCESS, hasLock=false; CloseRegionProcedure 6b4e91789c9e6871014ad3a1098f268b, server=3469f9ca0af3,42349,1733741767108 in 198 msec 2024-12-09T11:01:32,982 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=132}] handler.UnassignRegionHandler(157): Closed e4d7e2d2271822e45776321fc76f1589 2024-12-09T11:01:32,983 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=131 updating hbase:meta row=e4d7e2d2271822e45776321fc76f1589, regionState=CLOSED 2024-12-09T11:01:32,984 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=130, ppid=129, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=6b4e91789c9e6871014ad3a1098f268b, UNASSIGN in 208 msec 2024-12-09T11:01:32,988 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=132, ppid=131, state=RUNNABLE, hasLock=false; CloseRegionProcedure e4d7e2d2271822e45776321fc76f1589, server=3469f9ca0af3,39691,1733741766880 because future has completed 2024-12-09T11:01:32,994 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=132, resume processing ppid=131 2024-12-09T11:01:32,994 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=132, ppid=131, state=SUCCESS, hasLock=false; CloseRegionProcedure e4d7e2d2271822e45776321fc76f1589, server=3469f9ca0af3,39691,1733741766880 in 212 msec 2024-12-09T11:01:32,996 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=131, resume processing ppid=129 2024-12-09T11:01:32,996 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=131, ppid=129, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=e4d7e2d2271822e45776321fc76f1589, UNASSIGN in 220 msec 2024-12-09T11:01:33,000 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=129, resume processing ppid=128 2024-12-09T11:01:33,000 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=129, ppid=128, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports in 225 msec 2024-12-09T11:01:33,002 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733742093002"}]},"ts":"1733742093002"} 2024-12-09T11:01:33,008 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLED in hbase:meta 2024-12-09T11:01:33,009 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testConsecutiveExports to state=DISABLED 2024-12-09T11:01:33,012 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=128, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports in 245 msec 2024-12-09T11:01:33,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=128 2024-12-09T11:01:33,083 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testConsecutiveExports completed 2024-12-09T11:01:33,083 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testConsecutiveExports 2024-12-09T11:01:33,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=134, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-09T11:01:33,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testConsecutiveExports 2024-12-09T11:01:33,089 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=134, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-09T11:01:33,092 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=134, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-09T11:01:33,096 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39691 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testConsecutiveExports 2024-12-09T11:01:33,100 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-09T11:01:33,100 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-09T11:01:33,101 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-09T11:01:33,101 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-09T11:01:33,101 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-09T11:01:33,101 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-09T11:01:33,102 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-09T11:01:33,102 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-09T11:01:33,102 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testConsecutiveExports/6b4e91789c9e6871014ad3a1098f268b 2024-12-09T11:01:33,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-09T11:01:33,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-09T11:01:33,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:01:33,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:01:33,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-09T11:01:33,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:01:33,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=134 2024-12-09T11:01:33,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-09T11:01:33,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:01:33,106 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testConsecutiveExports/e4d7e2d2271822e45776321fc76f1589 2024-12-09T11:01:33,106 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testConsecutiveExports/6b4e91789c9e6871014ad3a1098f268b/cf, FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testConsecutiveExports/6b4e91789c9e6871014ad3a1098f268b/recovered.edits] 2024-12-09T11:01:33,113 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testConsecutiveExports/e4d7e2d2271822e45776321fc76f1589/cf, FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testConsecutiveExports/e4d7e2d2271822e45776321fc76f1589/recovered.edits] 2024-12-09T11:01:33,116 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testConsecutiveExports/6b4e91789c9e6871014ad3a1098f268b/cf/c6175a15d1e845c291e93c1e8fb479f4 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testConsecutiveExports/6b4e91789c9e6871014ad3a1098f268b/cf/c6175a15d1e845c291e93c1e8fb479f4 2024-12-09T11:01:33,121 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testConsecutiveExports/6b4e91789c9e6871014ad3a1098f268b/recovered.edits/9.seqid to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testConsecutiveExports/6b4e91789c9e6871014ad3a1098f268b/recovered.edits/9.seqid 2024-12-09T11:01:33,121 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testConsecutiveExports/e4d7e2d2271822e45776321fc76f1589/cf/4a16fe9df5924f5694a13837aec0957f to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testConsecutiveExports/e4d7e2d2271822e45776321fc76f1589/cf/4a16fe9df5924f5694a13837aec0957f 2024-12-09T11:01:33,122 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testConsecutiveExports/6b4e91789c9e6871014ad3a1098f268b 2024-12-09T11:01:33,125 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testConsecutiveExports/e4d7e2d2271822e45776321fc76f1589/recovered.edits/9.seqid to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testConsecutiveExports/e4d7e2d2271822e45776321fc76f1589/recovered.edits/9.seqid 2024-12-09T11:01:33,126 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testConsecutiveExports/e4d7e2d2271822e45776321fc76f1589 2024-12-09T11:01:33,126 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testConsecutiveExports regions 2024-12-09T11:01:33,127 DEBUG [PEWorker-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac 2024-12-09T11:01:33,128 DEBUG [PEWorker-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf] 2024-12-09T11:01:33,132 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b202412094c73b75e33934c839e5a87384151c4e8_e4d7e2d2271822e45776321fc76f1589 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b202412094c73b75e33934c839e5a87384151c4e8_e4d7e2d2271822e45776321fc76f1589 2024-12-09T11:01:33,134 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e20241209118e179f4a9545d29eb38127d5bb75d8_6b4e91789c9e6871014ad3a1098f268b to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e20241209118e179f4a9545d29eb38127d5bb75d8_6b4e91789c9e6871014ad3a1098f268b 2024-12-09T11:01:33,135 DEBUG [PEWorker-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac 2024-12-09T11:01:33,138 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=134, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-09T11:01:33,147 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testConsecutiveExports from hbase:meta 2024-12-09T11:01:33,150 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testConsecutiveExports' descriptor. 2024-12-09T11:01:33,152 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=134, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-09T11:01:33,153 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testConsecutiveExports' from region states. 2024-12-09T11:01:33,153 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,,1733742030439.6b4e91789c9e6871014ad3a1098f268b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733742093153"}]},"ts":"9223372036854775807"} 2024-12-09T11:01:33,153 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,1,1733742030439.e4d7e2d2271822e45776321fc76f1589.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733742093153"}]},"ts":"9223372036854775807"} 2024-12-09T11:01:33,156 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-09T11:01:33,156 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 6b4e91789c9e6871014ad3a1098f268b, NAME => 'testtb-testConsecutiveExports,,1733742030439.6b4e91789c9e6871014ad3a1098f268b.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => e4d7e2d2271822e45776321fc76f1589, NAME => 'testtb-testConsecutiveExports,1,1733742030439.e4d7e2d2271822e45776321fc76f1589.', STARTKEY => '1', ENDKEY => ''}] 2024-12-09T11:01:33,156 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testConsecutiveExports' as deleted. 2024-12-09T11:01:33,157 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733742093156"}]},"ts":"9223372036854775807"} 2024-12-09T11:01:33,162 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testConsecutiveExports state from META 2024-12-09T11:01:33,163 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=134, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-09T11:01:33,165 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=134, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports in 80 msec 2024-12-09T11:01:33,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=134 2024-12-09T11:01:33,213 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testConsecutiveExports 2024-12-09T11:01:33,213 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testConsecutiveExports completed 2024-12-09T11:01:33,222 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testConsecutiveExports" type: DISABLED 2024-12-09T11:01:33,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testConsecutiveExports 2024-12-09T11:01:33,227 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testConsecutiveExports" type: DISABLED 2024-12-09T11:01:33,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testConsecutiveExports 2024-12-09T11:01:33,265 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testConsecutiveExports Thread=802 (was 795) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1751713061_22 at /127.0.0.1:33220 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1072384417_22 at /127.0.0.1:43070 [Receiving block BP-1191631881-172.17.0.2-1733741760972:blk_1073742175_1351] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-0-hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6-prefix:3469f9ca0af3,33293,1733741767044.meta java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1191631881-172.17.0.2-1733741760972:blk_1073742175_1351, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1072384417_22 at /127.0.0.1:36490 [Receiving block BP-1191631881-172.17.0.2-1733741760972:blk_1073742175_1351] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-5553 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: ContainersLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1072384417_22 at /127.0.0.1:41692 [Receiving block BP-1191631881-172.17.0.2-1733741760972:blk_1073742175_1351] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1191631881-172.17.0.2-1733741760972:blk_1073742175_1351, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1751713061_22 at /127.0.0.1:56756 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2065711171_1 at /127.0.0.1:50062 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1191631881-172.17.0.2-1733741760972:blk_1073742175_1351, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2065711171_1 at /127.0.0.1:33192 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 16615) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1751713061_22 at /127.0.0.1:50078 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1359318497) connection to localhost/127.0.0.1:42681 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42681 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=811 (was 807) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1269 (was 1354), ProcessCount=17 (was 17), AvailableMemoryMB=2581 (was 3077) 2024-12-09T11:01:33,265 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=802 is superior to 500 2024-12-09T11:01:33,293 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=802, OpenFileDescriptor=811, MaxFileDescriptor=1048576, SystemLoadAverage=1269, ProcessCount=17, AvailableMemoryMB=2579 2024-12-09T11:01:33,293 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=802 is superior to 500 2024-12-09T11:01:33,295 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T11:01:33,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=135, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T11:01:33,298 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=135, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T11:01:33,299 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion" procId is: 135 2024-12-09T11:01:33,301 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=135, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T11:01:33,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=135 2024-12-09T11:01:33,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742181_1357 (size=458) 2024-12-09T11:01:33,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742181_1357 (size=458) 2024-12-09T11:01:33,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742181_1357 (size=458) 2024-12-09T11:01:33,338 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 0413b0a34c22bfd368e5306d4a33b41e, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733742093295.0413b0a34c22bfd368e5306d4a33b41e.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T11:01:33,342 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => bd3e01337768bb7e558b2def558c3c3a, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733742093295.bd3e01337768bb7e558b2def558c3c3a.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T11:01:33,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742182_1358 (size=83) 2024-12-09T11:01:33,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742182_1358 (size=83) 2024-12-09T11:01:33,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742182_1358 (size=83) 2024-12-09T11:01:33,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=135 2024-12-09T11:01:33,417 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1733742093295.0413b0a34c22bfd368e5306d4a33b41e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:01:33,418 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1722): Closing 0413b0a34c22bfd368e5306d4a33b41e, disabling compactions & flushes 2024-12-09T11:01:33,418 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1733742093295.0413b0a34c22bfd368e5306d4a33b41e. 2024-12-09T11:01:33,418 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733742093295.0413b0a34c22bfd368e5306d4a33b41e. 2024-12-09T11:01:33,418 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733742093295.0413b0a34c22bfd368e5306d4a33b41e. after waiting 0 ms 2024-12-09T11:01:33,418 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1733742093295.0413b0a34c22bfd368e5306d4a33b41e. 2024-12-09T11:01:33,418 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1733742093295.0413b0a34c22bfd368e5306d4a33b41e. 2024-12-09T11:01:33,418 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for 0413b0a34c22bfd368e5306d4a33b41e: Waiting for close lock at 1733742093418Disabling compacts and flushes for region at 1733742093418Disabling writes for close at 1733742093418Writing region close event to WAL at 1733742093418Closed at 1733742093418 2024-12-09T11:01:33,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742183_1359 (size=83) 2024-12-09T11:01:33,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742183_1359 (size=83) 2024-12-09T11:01:33,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742183_1359 (size=83) 2024-12-09T11:01:33,462 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1733742093295.bd3e01337768bb7e558b2def558c3c3a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:01:33,462 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1722): Closing bd3e01337768bb7e558b2def558c3c3a, disabling compactions & flushes 2024-12-09T11:01:33,462 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1733742093295.bd3e01337768bb7e558b2def558c3c3a. 2024-12-09T11:01:33,462 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733742093295.bd3e01337768bb7e558b2def558c3c3a. 2024-12-09T11:01:33,462 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733742093295.bd3e01337768bb7e558b2def558c3c3a. after waiting 0 ms 2024-12-09T11:01:33,462 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1733742093295.bd3e01337768bb7e558b2def558c3c3a. 2024-12-09T11:01:33,462 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1733742093295.bd3e01337768bb7e558b2def558c3c3a. 2024-12-09T11:01:33,462 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for bd3e01337768bb7e558b2def558c3c3a: Waiting for close lock at 1733742093462Disabling compacts and flushes for region at 1733742093462Disabling writes for close at 1733742093462Writing region close event to WAL at 1733742093462Closed at 1733742093462 2024-12-09T11:01:33,463 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=135, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T11:01:33,464 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1733742093295.0413b0a34c22bfd368e5306d4a33b41e.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733742093463"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733742093463"}]},"ts":"1733742093463"} 2024-12-09T11:01:33,464 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1733742093295.bd3e01337768bb7e558b2def558c3c3a.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733742093463"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733742093463"}]},"ts":"1733742093463"} 2024-12-09T11:01:33,467 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-09T11:01:33,474 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=135, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T11:01:33,474 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733742093474"}]},"ts":"1733742093474"} 2024-12-09T11:01:33,476 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLING in hbase:meta 2024-12-09T11:01:33,476 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {3469f9ca0af3=0} racks are {/default-rack=0} 2024-12-09T11:01:33,478 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T11:01:33,478 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T11:01:33,478 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T11:01:33,478 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T11:01:33,478 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T11:01:33,478 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T11:01:33,478 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T11:01:33,478 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T11:01:33,478 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T11:01:33,478 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T11:01:33,479 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=136, ppid=135, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=0413b0a34c22bfd368e5306d4a33b41e, ASSIGN}, {pid=137, ppid=135, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=bd3e01337768bb7e558b2def558c3c3a, ASSIGN}] 2024-12-09T11:01:33,481 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=136, ppid=135, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=0413b0a34c22bfd368e5306d4a33b41e, ASSIGN 2024-12-09T11:01:33,482 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=136, ppid=135, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=0413b0a34c22bfd368e5306d4a33b41e, ASSIGN; state=OFFLINE, location=3469f9ca0af3,39691,1733741766880; forceNewPlan=false, retain=false 2024-12-09T11:01:33,485 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=137, ppid=135, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=bd3e01337768bb7e558b2def558c3c3a, ASSIGN 2024-12-09T11:01:33,486 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=137, ppid=135, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=bd3e01337768bb7e558b2def558c3c3a, ASSIGN; state=OFFLINE, location=3469f9ca0af3,33293,1733741767044; forceNewPlan=false, retain=false 2024-12-09T11:01:33,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=135 2024-12-09T11:01:33,633 INFO [3469f9ca0af3:35815 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-09T11:01:33,633 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=137 updating hbase:meta row=bd3e01337768bb7e558b2def558c3c3a, regionState=OPENING, regionLocation=3469f9ca0af3,33293,1733741767044 2024-12-09T11:01:33,633 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=136 updating hbase:meta row=0413b0a34c22bfd368e5306d4a33b41e, regionState=OPENING, regionLocation=3469f9ca0af3,39691,1733741766880 2024-12-09T11:01:33,636 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=137, ppid=135, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=bd3e01337768bb7e558b2def558c3c3a, ASSIGN because future has completed 2024-12-09T11:01:33,637 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=138, ppid=137, state=RUNNABLE, hasLock=false; OpenRegionProcedure bd3e01337768bb7e558b2def558c3c3a, server=3469f9ca0af3,33293,1733741767044}] 2024-12-09T11:01:33,638 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=136, ppid=135, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=0413b0a34c22bfd368e5306d4a33b41e, ASSIGN because future has completed 2024-12-09T11:01:33,638 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=139, ppid=136, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0413b0a34c22bfd368e5306d4a33b41e, server=3469f9ca0af3,39691,1733741766880}] 2024-12-09T11:01:33,794 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=138}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,1,1733742093295.bd3e01337768bb7e558b2def558c3c3a. 2024-12-09T11:01:33,794 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=138}] regionserver.HRegion(7752): Opening region: {ENCODED => bd3e01337768bb7e558b2def558c3c3a, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733742093295.bd3e01337768bb7e558b2def558c3c3a.', STARTKEY => '1', ENDKEY => ''} 2024-12-09T11:01:33,794 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,,1733742093295.0413b0a34c22bfd368e5306d4a33b41e. 2024-12-09T11:01:33,795 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=138}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,1,1733742093295.bd3e01337768bb7e558b2def558c3c3a. service=AccessControlService 2024-12-09T11:01:33,795 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(7752): Opening region: {ENCODED => 0413b0a34c22bfd368e5306d4a33b41e, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733742093295.0413b0a34c22bfd368e5306d4a33b41e.', STARTKEY => '', ENDKEY => '1'} 2024-12-09T11:01:33,795 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=138}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T11:01:33,795 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,,1733742093295.0413b0a34c22bfd368e5306d4a33b41e. service=AccessControlService 2024-12-09T11:01:33,795 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=138}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion bd3e01337768bb7e558b2def558c3c3a 2024-12-09T11:01:33,795 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=138}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1733742093295.bd3e01337768bb7e558b2def558c3c3a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:01:33,795 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T11:01:33,795 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=138}] regionserver.HRegion(7794): checking encryption for bd3e01337768bb7e558b2def558c3c3a 2024-12-09T11:01:33,795 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=138}] regionserver.HRegion(7797): checking classloading for bd3e01337768bb7e558b2def558c3c3a 2024-12-09T11:01:33,795 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 0413b0a34c22bfd368e5306d4a33b41e 2024-12-09T11:01:33,795 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1733742093295.0413b0a34c22bfd368e5306d4a33b41e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:01:33,795 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(7794): checking encryption for 0413b0a34c22bfd368e5306d4a33b41e 2024-12-09T11:01:33,795 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(7797): checking classloading for 0413b0a34c22bfd368e5306d4a33b41e 2024-12-09T11:01:33,797 INFO [StoreOpener-0413b0a34c22bfd368e5306d4a33b41e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 0413b0a34c22bfd368e5306d4a33b41e 2024-12-09T11:01:33,798 INFO [StoreOpener-0413b0a34c22bfd368e5306d4a33b41e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0413b0a34c22bfd368e5306d4a33b41e columnFamilyName cf 2024-12-09T11:01:33,799 INFO [StoreOpener-bd3e01337768bb7e558b2def558c3c3a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region bd3e01337768bb7e558b2def558c3c3a 2024-12-09T11:01:33,799 DEBUG [StoreOpener-0413b0a34c22bfd368e5306d4a33b41e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:01:33,800 INFO [StoreOpener-0413b0a34c22bfd368e5306d4a33b41e-1 {}] regionserver.HStore(327): Store=0413b0a34c22bfd368e5306d4a33b41e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:01:33,800 INFO [StoreOpener-bd3e01337768bb7e558b2def558c3c3a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bd3e01337768bb7e558b2def558c3c3a columnFamilyName cf 2024-12-09T11:01:33,800 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(1038): replaying wal for 0413b0a34c22bfd368e5306d4a33b41e 2024-12-09T11:01:33,801 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion/0413b0a34c22bfd368e5306d4a33b41e 2024-12-09T11:01:33,801 DEBUG [StoreOpener-bd3e01337768bb7e558b2def558c3c3a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:01:33,802 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion/0413b0a34c22bfd368e5306d4a33b41e 2024-12-09T11:01:33,802 INFO [StoreOpener-bd3e01337768bb7e558b2def558c3c3a-1 {}] regionserver.HStore(327): Store=bd3e01337768bb7e558b2def558c3c3a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:01:33,802 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(1048): stopping wal replay for 0413b0a34c22bfd368e5306d4a33b41e 2024-12-09T11:01:33,802 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(1060): Cleaning up temporary data for 0413b0a34c22bfd368e5306d4a33b41e 2024-12-09T11:01:33,802 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=138}] regionserver.HRegion(1038): replaying wal for bd3e01337768bb7e558b2def558c3c3a 2024-12-09T11:01:33,803 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=138}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion/bd3e01337768bb7e558b2def558c3c3a 2024-12-09T11:01:33,803 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=138}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion/bd3e01337768bb7e558b2def558c3c3a 2024-12-09T11:01:33,804 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=138}] regionserver.HRegion(1048): stopping wal replay for bd3e01337768bb7e558b2def558c3c3a 2024-12-09T11:01:33,804 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=138}] regionserver.HRegion(1060): Cleaning up temporary data for bd3e01337768bb7e558b2def558c3c3a 2024-12-09T11:01:33,804 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(1093): writing seq id for 0413b0a34c22bfd368e5306d4a33b41e 2024-12-09T11:01:33,806 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=138}] regionserver.HRegion(1093): writing seq id for bd3e01337768bb7e558b2def558c3c3a 2024-12-09T11:01:33,807 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion/0413b0a34c22bfd368e5306d4a33b41e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:01:33,807 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(1114): Opened 0413b0a34c22bfd368e5306d4a33b41e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69537550, jitterRate=0.03619024157524109}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T11:01:33,807 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0413b0a34c22bfd368e5306d4a33b41e 2024-12-09T11:01:33,808 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(1006): Region open journal for 0413b0a34c22bfd368e5306d4a33b41e: Running coprocessor pre-open hook at 1733742093795Writing region info on filesystem at 1733742093795Initializing all the Stores at 1733742093796 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733742093796Cleaning up temporary data from old regions at 1733742093802 (+6 ms)Running coprocessor post-open hooks at 1733742093807 (+5 ms)Region opened successfully at 1733742093808 (+1 ms) 2024-12-09T11:01:33,809 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=138}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion/bd3e01337768bb7e558b2def558c3c3a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:01:33,809 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,,1733742093295.0413b0a34c22bfd368e5306d4a33b41e., pid=139, masterSystemTime=1733742093792 2024-12-09T11:01:33,809 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=138}] regionserver.HRegion(1114): Opened bd3e01337768bb7e558b2def558c3c3a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66896253, jitterRate=-0.003168150782585144}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T11:01:33,809 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=138}] regionserver.HRegion(1122): Running coprocessor post-open hooks for bd3e01337768bb7e558b2def558c3c3a 2024-12-09T11:01:33,809 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=138}] regionserver.HRegion(1006): Region open journal for bd3e01337768bb7e558b2def558c3c3a: Running coprocessor pre-open hook at 1733742093795Writing region info on filesystem at 1733742093795Initializing all the Stores at 1733742093798 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733742093798Cleaning up temporary data from old regions at 1733742093804 (+6 ms)Running coprocessor post-open hooks at 1733742093809 (+5 ms)Region opened successfully at 1733742093809 2024-12-09T11:01:33,811 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=138}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,1,1733742093295.bd3e01337768bb7e558b2def558c3c3a., pid=138, masterSystemTime=1733742093789 2024-12-09T11:01:33,812 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,,1733742093295.0413b0a34c22bfd368e5306d4a33b41e. 2024-12-09T11:01:33,812 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,,1733742093295.0413b0a34c22bfd368e5306d4a33b41e. 2024-12-09T11:01:33,813 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=136 updating hbase:meta row=0413b0a34c22bfd368e5306d4a33b41e, regionState=OPEN, openSeqNum=2, regionLocation=3469f9ca0af3,39691,1733741766880 2024-12-09T11:01:33,813 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=138}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,1,1733742093295.bd3e01337768bb7e558b2def558c3c3a. 2024-12-09T11:01:33,813 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=138}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,1,1733742093295.bd3e01337768bb7e558b2def558c3c3a. 2024-12-09T11:01:33,813 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=137 updating hbase:meta row=bd3e01337768bb7e558b2def558c3c3a, regionState=OPEN, openSeqNum=2, regionLocation=3469f9ca0af3,33293,1733741767044 2024-12-09T11:01:33,815 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=139, ppid=136, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0413b0a34c22bfd368e5306d4a33b41e, server=3469f9ca0af3,39691,1733741766880 because future has completed 2024-12-09T11:01:33,816 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=138, ppid=137, state=RUNNABLE, hasLock=false; OpenRegionProcedure bd3e01337768bb7e558b2def558c3c3a, server=3469f9ca0af3,33293,1733741767044 because future has completed 2024-12-09T11:01:33,819 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=139, resume processing ppid=136 2024-12-09T11:01:33,819 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=139, ppid=136, state=SUCCESS, hasLock=false; OpenRegionProcedure 0413b0a34c22bfd368e5306d4a33b41e, server=3469f9ca0af3,39691,1733741766880 in 179 msec 2024-12-09T11:01:33,820 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=136, ppid=135, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=0413b0a34c22bfd368e5306d4a33b41e, ASSIGN in 340 msec 2024-12-09T11:01:33,820 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=138, resume processing ppid=137 2024-12-09T11:01:33,821 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=138, ppid=137, state=SUCCESS, hasLock=false; OpenRegionProcedure bd3e01337768bb7e558b2def558c3c3a, server=3469f9ca0af3,33293,1733741767044 in 181 msec 2024-12-09T11:01:33,822 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=137, resume processing ppid=135 2024-12-09T11:01:33,823 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=137, ppid=135, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=bd3e01337768bb7e558b2def558c3c3a, ASSIGN in 342 msec 2024-12-09T11:01:33,823 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=135, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T11:01:33,824 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733742093823"}]},"ts":"1733742093823"} 2024-12-09T11:01:33,825 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLED in hbase:meta 2024-12-09T11:01:33,826 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=135, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T11:01:33,827 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion jenkins: RWXCA 2024-12-09T11:01:33,831 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39691 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-09T11:01:33,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:01:33,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:01:33,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:01:33,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:01:33,837 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T11:01:33,837 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T11:01:33,837 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T11:01:33,837 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T11:01:33,840 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=135, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 542 msec 2024-12-09T11:01:33,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=135 2024-12-09T11:01:33,933 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-09T11:01:33,933 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-09T11:01:33,935 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39691 {}] ipc.CallRunner(138): callId: 316 service: ClientService methodName: Scan size: 218 connection: 172.17.0.2:34982 deadline: 1733742153934, exception=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=3469f9ca0af3 port=33293 startCode=1733741767044. As of locationSeqNum=119. 2024-12-09T11:01:33,936 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,39691,1733741766880, seqNum=-1 , the old value is region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,39691,1733741766880, seqNum=-1, error=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=3469f9ca0af3 port=33293 startCode=1733741767044. As of locationSeqNum=119. 2024-12-09T11:01:33,936 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,39691,1733741766880, seqNum=-1 is org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=3469f9ca0af3 port=33293 startCode=1733741767044. As of locationSeqNum=119. 2024-12-09T11:01:33,936 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(84): Try updating region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,39691,1733741766880, seqNum=-1 with the new location region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,33293,1733741767044, seqNum=119 constructed by org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=3469f9ca0af3 port=33293 startCode=1733741767044. As of locationSeqNum=119. 2024-12-09T11:01:34,047 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56530, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:01:34,051 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T11:01:34,051 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1733742093295.0413b0a34c22bfd368e5306d4a33b41e. 2024-12-09T11:01:34,051 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T11:01:34,056 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-09T11:01:34,065 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-09T11:01:34,073 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-09T11:01:34,079 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-09T11:01:34,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733742094079 (current time:1733742094079). 2024-12-09T11:01:34,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T11:01:34,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-09T11:01:34,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T11:01:34,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e366a87, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:01:34,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T11:01:34,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T11:01:34,086 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T11:01:34,087 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T11:01:34,087 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T11:01:34,087 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e5bd5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:01:34,087 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T11:01:34,088 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T11:01:34,088 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:01:34,089 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40286, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T11:01:34,090 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5bf99f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:01:34,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:01:34,091 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,33293,1733741767044, seqNum=-1] 2024-12-09T11:01:34,092 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:01:34,093 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56540, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:01:34,103 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815. 2024-12-09T11:01:34,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T11:01:34,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:01:34,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:01:34,108 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T11:01:34,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24fbe94f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:01:34,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T11:01:34,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T11:01:34,111 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T11:01:34,112 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T11:01:34,112 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T11:01:34,112 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7355b322, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:01:34,112 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T11:01:34,112 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T11:01:34,112 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:01:34,113 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40304, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T11:01:34,114 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17edd6c9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:01:34,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:01:34,116 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,33293,1733741767044, seqNum=-1] 2024-12-09T11:01:34,116 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:01:34,118 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56550, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:01:34,121 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb., hostname=3469f9ca0af3,39691,1733741766880, seqNum=2] 2024-12-09T11:01:34,122 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:01:34,123 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52968, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:01:34,125 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815. 2024-12-09T11:01:34,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T11:01:34,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:01:34,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:01:34,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-09T11:01:34,126 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T11:01:34,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T11:01:34,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=140, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-09T11:01:34,130 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T11:01:34,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 140 2024-12-09T11:01:34,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-09T11:01:34,132 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T11:01:34,135 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T11:01:34,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742184_1360 (size=215) 2024-12-09T11:01:34,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742184_1360 (size=215) 2024-12-09T11:01:34,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742184_1360 (size=215) 2024-12-09T11:01:34,166 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T11:01:34,167 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0413b0a34c22bfd368e5306d4a33b41e}, {pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bd3e01337768bb7e558b2def558c3c3a}] 2024-12-09T11:01:34,168 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bd3e01337768bb7e558b2def558c3c3a 2024-12-09T11:01:34,168 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0413b0a34c22bfd368e5306d4a33b41e 2024-12-09T11:01:34,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-09T11:01:34,320 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33293 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=142 2024-12-09T11:01:34,320 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733742093295.bd3e01337768bb7e558b2def558c3c3a. 2024-12-09T11:01:34,321 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2603): Flush status journal for bd3e01337768bb7e558b2def558c3c3a: 2024-12-09T11:01:34,321 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1733742093295.bd3e01337768bb7e558b2def558c3c3a. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-09T11:01:34,321 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1733742093295.bd3e01337768bb7e558b2def558c3c3a.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-09T11:01:34,321 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T11:01:34,321 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-09T11:01:34,322 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39691 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=141 2024-12-09T11:01:34,322 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733742093295.0413b0a34c22bfd368e5306d4a33b41e. 2024-12-09T11:01:34,322 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2603): Flush status journal for 0413b0a34c22bfd368e5306d4a33b41e: 2024-12-09T11:01:34,322 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1733742093295.0413b0a34c22bfd368e5306d4a33b41e. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-09T11:01:34,323 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1733742093295.0413b0a34c22bfd368e5306d4a33b41e.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-09T11:01:34,323 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T11:01:34,323 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-09T11:01:34,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742185_1361 (size=86) 2024-12-09T11:01:34,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742185_1361 (size=86) 2024-12-09T11:01:34,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742185_1361 (size=86) 2024-12-09T11:01:34,349 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733742093295.bd3e01337768bb7e558b2def558c3c3a. 2024-12-09T11:01:34,350 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=142 2024-12-09T11:01:34,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=142 2024-12-09T11:01:34,351 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region bd3e01337768bb7e558b2def558c3c3a 2024-12-09T11:01:34,351 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bd3e01337768bb7e558b2def558c3c3a 2024-12-09T11:01:34,354 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=142, ppid=140, state=SUCCESS, hasLock=false; SnapshotRegionProcedure bd3e01337768bb7e558b2def558c3c3a in 186 msec 2024-12-09T11:01:34,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742186_1362 (size=86) 2024-12-09T11:01:34,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742186_1362 (size=86) 2024-12-09T11:01:34,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742186_1362 (size=86) 2024-12-09T11:01:34,385 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733742093295.0413b0a34c22bfd368e5306d4a33b41e. 2024-12-09T11:01:34,385 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-12-09T11:01:34,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=141 2024-12-09T11:01:34,386 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 0413b0a34c22bfd368e5306d4a33b41e 2024-12-09T11:01:34,387 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0413b0a34c22bfd368e5306d4a33b41e 2024-12-09T11:01:34,393 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=141, resume processing ppid=140 2024-12-09T11:01:34,393 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T11:01:34,393 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=141, ppid=140, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 0413b0a34c22bfd368e5306d4a33b41e in 225 msec 2024-12-09T11:01:34,394 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T11:01:34,403 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-09T11:01:34,403 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-09T11:01:34,403 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:01:34,403 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-09T11:01:34,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742187_1363 (size=78) 2024-12-09T11:01:34,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742187_1363 (size=78) 2024-12-09T11:01:34,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742187_1363 (size=78) 2024-12-09T11:01:34,442 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T11:01:34,443 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-09T11:01:34,446 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-09T11:01:34,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-09T11:01:34,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742188_1364 (size=713) 2024-12-09T11:01:34,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742188_1364 (size=713) 2024-12-09T11:01:34,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742188_1364 (size=713) 2024-12-09T11:01:34,500 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T11:01:34,502 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T11:01:34,506 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T11:01:34,507 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-09T11:01:34,510 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T11:01:34,510 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 140 2024-12-09T11:01:34,513 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=140, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 383 msec 2024-12-09T11:01:34,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-09T11:01:34,763 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-09T11:01:34,779 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39691 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,,1733742093295.0413b0a34c22bfd368e5306d4a33b41e. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T11:01:34,783 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33293 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,1,1733742093295.bd3e01337768bb7e558b2def558c3c3a. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T11:01:34,788 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-09T11:01:34,793 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T11:01:34,793 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1733742093295.0413b0a34c22bfd368e5306d4a33b41e. 2024-12-09T11:01:34,793 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T11:01:34,796 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-09T11:01:34,805 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-09T11:01:34,813 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-09T11:01:34,817 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-09T11:01:34,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733742094818 (current time:1733742094818). 2024-12-09T11:01:34,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T11:01:34,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-09T11:01:34,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T11:01:34,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@399f9598, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:01:34,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T11:01:34,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T11:01:34,827 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T11:01:34,828 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T11:01:34,828 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T11:01:34,828 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61ef9c46, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:01:34,828 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T11:01:34,829 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T11:01:34,829 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:01:34,830 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40322, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T11:01:34,830 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a7ee38d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:01:34,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:01:34,832 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,33293,1733741767044, seqNum=-1] 2024-12-09T11:01:34,832 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:01:34,833 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56554, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:01:34,836 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815. 2024-12-09T11:01:34,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T11:01:34,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:01:34,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:01:34,837 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T11:01:34,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@540e65e4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:01:34,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T11:01:34,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T11:01:34,839 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T11:01:34,840 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T11:01:34,840 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T11:01:34,840 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@491044b5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:01:34,840 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T11:01:34,841 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T11:01:34,841 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:01:34,842 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40336, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T11:01:34,843 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@360aa12d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:01:34,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:01:34,844 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,33293,1733741767044, seqNum=-1] 2024-12-09T11:01:34,845 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:01:34,846 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56564, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:01:34,849 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb., hostname=3469f9ca0af3,39691,1733741766880, seqNum=2] 2024-12-09T11:01:34,849 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:01:34,851 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52974, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:01:34,852 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815. 2024-12-09T11:01:34,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T11:01:34,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:01:34,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:01:34,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-09T11:01:34,853 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T11:01:34,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T11:01:34,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=143, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=143, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-09T11:01:34,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 143 2024-12-09T11:01:34,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-09T11:01:34,857 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=143, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=143, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T11:01:34,860 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=143, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=143, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T11:01:34,864 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=143, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=143, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T11:01:34,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742189_1365 (size=210) 2024-12-09T11:01:34,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742189_1365 (size=210) 2024-12-09T11:01:34,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742189_1365 (size=210) 2024-12-09T11:01:34,874 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=143, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=143, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T11:01:34,874 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=144, ppid=143, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0413b0a34c22bfd368e5306d4a33b41e}, {pid=145, ppid=143, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bd3e01337768bb7e558b2def558c3c3a}] 2024-12-09T11:01:34,875 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=144, ppid=143, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0413b0a34c22bfd368e5306d4a33b41e 2024-12-09T11:01:34,875 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=145, ppid=143, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bd3e01337768bb7e558b2def558c3c3a 2024-12-09T11:01:34,882 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35815 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=3469f9ca0af3,39691,1733741766880, table=testtb-testExportFileSystemStateWithMergeRegion, region=0413b0a34c22bfd368e5306d4a33b41e. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-09T11:01:34,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-09T11:01:35,028 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33293 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=145 2024-12-09T11:01:35,028 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733742093295.bd3e01337768bb7e558b2def558c3c3a. 2024-12-09T11:01:35,028 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegion(2902): Flushing bd3e01337768bb7e558b2def558c3c3a 1/1 column families, dataSize=2.80 KB heapSize=6.30 KB 2024-12-09T11:01:35,034 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39691 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=144 2024-12-09T11:01:35,034 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=144}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733742093295.0413b0a34c22bfd368e5306d4a33b41e. 2024-12-09T11:01:35,034 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=144}] regionserver.HRegion(2902): Flushing 0413b0a34c22bfd368e5306d4a33b41e 1/1 column families, dataSize=467 B heapSize=1.23 KB 2024-12-09T11:01:35,077 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241209ed09fd8db6dd4d68bb0f5e3175feeb48_bd3e01337768bb7e558b2def558c3c3a is 71, key is 1c724bb7f9d65957dd05e374589be9c9/cf:q/1733742094782/Put/seqid=0 2024-12-09T11:01:35,079 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=144}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209fe0c8b23c89346088189699fbb92d41b_0413b0a34c22bfd368e5306d4a33b41e is 71, key is 028873c077913cbc6a9a6c8c9046225c/cf:q/1733742094779/Put/seqid=0 2024-12-09T11:01:35,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742190_1366 (size=5382) 2024-12-09T11:01:35,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742190_1366 (size=5382) 2024-12-09T11:01:35,098 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=144}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:01:35,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742190_1366 (size=5382) 2024-12-09T11:01:35,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742191_1367 (size=7892) 2024-12-09T11:01:35,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742191_1367 (size=7892) 2024-12-09T11:01:35,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742191_1367 (size=7892) 2024-12-09T11:01:35,100 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:01:35,105 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=144}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209fe0c8b23c89346088189699fbb92d41b_0413b0a34c22bfd368e5306d4a33b41e to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241209fe0c8b23c89346088189699fbb92d41b_0413b0a34c22bfd368e5306d4a33b41e 2024-12-09T11:01:35,106 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241209ed09fd8db6dd4d68bb0f5e3175feeb48_bd3e01337768bb7e558b2def558c3c3a to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b20241209ed09fd8db6dd4d68bb0f5e3175feeb48_bd3e01337768bb7e558b2def558c3c3a 2024-12-09T11:01:35,106 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=144}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion/0413b0a34c22bfd368e5306d4a33b41e/.tmp/cf/7ea5edac991c4103836a4ef8cd8c39a4, store: [table=testtb-testExportFileSystemStateWithMergeRegion family=cf region=0413b0a34c22bfd368e5306d4a33b41e] 2024-12-09T11:01:35,106 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=144}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion/0413b0a34c22bfd368e5306d4a33b41e/.tmp/cf/7ea5edac991c4103836a4ef8cd8c39a4 is 224, key is 0d61b7dd238ee7b4a792cc1472daebec6/cf:q/1733742094779/Put/seqid=0 2024-12-09T11:01:35,107 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion/bd3e01337768bb7e558b2def558c3c3a/.tmp/cf/9ac2d5cc9a324e34b4d48d84bdadcab6, store: [table=testtb-testExportFileSystemStateWithMergeRegion family=cf region=bd3e01337768bb7e558b2def558c3c3a] 2024-12-09T11:01:35,107 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion/bd3e01337768bb7e558b2def558c3c3a/.tmp/cf/9ac2d5cc9a324e34b4d48d84bdadcab6 is 224, key is 119cac051744620a8c7a9a8321c246d2f/cf:q/1733742094782/Put/seqid=0 2024-12-09T11:01:35,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742192_1368 (size=14839) 2024-12-09T11:01:35,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742192_1368 (size=14839) 2024-12-09T11:01:35,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742192_1368 (size=14839) 2024-12-09T11:01:35,122 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion/bd3e01337768bb7e558b2def558c3c3a/.tmp/cf/9ac2d5cc9a324e34b4d48d84bdadcab6 2024-12-09T11:01:35,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742193_1369 (size=6866) 2024-12-09T11:01:35,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742193_1369 (size=6866) 2024-12-09T11:01:35,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742193_1369 (size=6866) 2024-12-09T11:01:35,130 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=144}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=467, hasBloomFilter=true, into tmp file hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion/0413b0a34c22bfd368e5306d4a33b41e/.tmp/cf/7ea5edac991c4103836a4ef8cd8c39a4 2024-12-09T11:01:35,136 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=144}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion/0413b0a34c22bfd368e5306d4a33b41e/.tmp/cf/7ea5edac991c4103836a4ef8cd8c39a4 as hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion/0413b0a34c22bfd368e5306d4a33b41e/cf/7ea5edac991c4103836a4ef8cd8c39a4 2024-12-09T11:01:35,144 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=144}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion/0413b0a34c22bfd368e5306d4a33b41e/cf/7ea5edac991c4103836a4ef8cd8c39a4, entries=7, sequenceid=6, filesize=6.7 K 2024-12-09T11:01:35,145 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=144}] regionserver.HRegion(3140): Finished flush of dataSize ~467 B/467, heapSize ~1.22 KB/1248, currentSize=0 B/0 for 0413b0a34c22bfd368e5306d4a33b41e in 111ms, sequenceid=6, compaction requested=false 2024-12-09T11:01:35,145 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=144}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion' 2024-12-09T11:01:35,146 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=144}] regionserver.HRegion(2603): Flush status journal for 0413b0a34c22bfd368e5306d4a33b41e: 2024-12-09T11:01:35,146 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=144}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1733742093295.0413b0a34c22bfd368e5306d4a33b41e. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-09T11:01:35,146 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=144}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1733742093295.0413b0a34c22bfd368e5306d4a33b41e.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-09T11:01:35,146 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=144}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T11:01:35,146 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=144}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion/0413b0a34c22bfd368e5306d4a33b41e/cf/7ea5edac991c4103836a4ef8cd8c39a4] hfiles 2024-12-09T11:01:35,146 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=144}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion/0413b0a34c22bfd368e5306d4a33b41e/cf/7ea5edac991c4103836a4ef8cd8c39a4 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-09T11:01:35,148 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion/bd3e01337768bb7e558b2def558c3c3a/.tmp/cf/9ac2d5cc9a324e34b4d48d84bdadcab6 as hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion/bd3e01337768bb7e558b2def558c3c3a/cf/9ac2d5cc9a324e34b4d48d84bdadcab6 2024-12-09T11:01:35,153 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion/bd3e01337768bb7e558b2def558c3c3a/cf/9ac2d5cc9a324e34b4d48d84bdadcab6, entries=43, sequenceid=6, filesize=14.5 K 2024-12-09T11:01:35,162 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegion(3140): Finished flush of dataSize ~2.80 KB/2869, heapSize ~6.28 KB/6432, currentSize=0 B/0 for bd3e01337768bb7e558b2def558c3c3a in 134ms, sequenceid=6, compaction requested=false 2024-12-09T11:01:35,162 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegion(2603): Flush status journal for bd3e01337768bb7e558b2def558c3c3a: 2024-12-09T11:01:35,162 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1733742093295.bd3e01337768bb7e558b2def558c3c3a. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-09T11:01:35,162 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1733742093295.bd3e01337768bb7e558b2def558c3c3a.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-09T11:01:35,162 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T11:01:35,162 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion/bd3e01337768bb7e558b2def558c3c3a/cf/9ac2d5cc9a324e34b4d48d84bdadcab6] hfiles 2024-12-09T11:01:35,162 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion/bd3e01337768bb7e558b2def558c3c3a/cf/9ac2d5cc9a324e34b4d48d84bdadcab6 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-09T11:01:35,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742194_1370 (size=125) 2024-12-09T11:01:35,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742194_1370 (size=125) 2024-12-09T11:01:35,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742194_1370 (size=125) 2024-12-09T11:01:35,170 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=144}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733742093295.0413b0a34c22bfd368e5306d4a33b41e. 2024-12-09T11:01:35,170 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=144}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=144 2024-12-09T11:01:35,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=144 2024-12-09T11:01:35,171 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 0413b0a34c22bfd368e5306d4a33b41e 2024-12-09T11:01:35,171 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=144, ppid=143, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0413b0a34c22bfd368e5306d4a33b41e 2024-12-09T11:01:35,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-09T11:01:35,174 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=144, ppid=143, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 0413b0a34c22bfd368e5306d4a33b41e in 298 msec 2024-12-09T11:01:35,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742195_1371 (size=125) 2024-12-09T11:01:35,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742195_1371 (size=125) 2024-12-09T11:01:35,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742195_1371 (size=125) 2024-12-09T11:01:35,197 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733742093295.bd3e01337768bb7e558b2def558c3c3a. 2024-12-09T11:01:35,197 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=145 2024-12-09T11:01:35,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=145 2024-12-09T11:01:35,197 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region bd3e01337768bb7e558b2def558c3c3a 2024-12-09T11:01:35,198 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=145, ppid=143, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bd3e01337768bb7e558b2def558c3c3a 2024-12-09T11:01:35,201 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=145, resume processing ppid=143 2024-12-09T11:01:35,201 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=145, ppid=143, state=SUCCESS, hasLock=false; SnapshotRegionProcedure bd3e01337768bb7e558b2def558c3c3a in 325 msec 2024-12-09T11:01:35,201 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=143, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=143, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T11:01:35,203 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=143, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=143, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T11:01:35,204 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-09T11:01:35,204 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-09T11:01:35,204 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:01:35,206 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b20241209ed09fd8db6dd4d68bb0f5e3175feeb48_bd3e01337768bb7e558b2def558c3c3a, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241209fe0c8b23c89346088189699fbb92d41b_0413b0a34c22bfd368e5306d4a33b41e] hfiles 2024-12-09T11:01:35,206 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b20241209ed09fd8db6dd4d68bb0f5e3175feeb48_bd3e01337768bb7e558b2def558c3c3a 2024-12-09T11:01:35,206 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241209fe0c8b23c89346088189699fbb92d41b_0413b0a34c22bfd368e5306d4a33b41e 2024-12-09T11:01:35,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742196_1372 (size=309) 2024-12-09T11:01:35,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742196_1372 (size=309) 2024-12-09T11:01:35,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742196_1372 (size=309) 2024-12-09T11:01:35,215 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=143, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=143, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T11:01:35,215 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-09T11:01:35,216 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-09T11:01:35,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742197_1373 (size=1023) 2024-12-09T11:01:35,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742197_1373 (size=1023) 2024-12-09T11:01:35,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742197_1373 (size=1023) 2024-12-09T11:01:35,253 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=143, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=143, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T11:01:35,270 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=143, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=143, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T11:01:35,271 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-09T11:01:35,287 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=143, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=143, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T11:01:35,287 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 143 2024-12-09T11:01:35,292 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=143, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=143, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 434 msec 2024-12-09T11:01:35,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-09T11:01:35,483 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-09T11:01:35,485 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T11:01:35,486 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T11:01:35,489 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T11:01:35,490 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56576, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T11:01:35,490 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55204, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T11:01:35,495 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52984, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T11:01:35,498 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion-1', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T11:01:35,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=146, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T11:01:35,503 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=146, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T11:01:35,504 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:01:35,504 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion-1" procId is: 146 2024-12-09T11:01:35,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=146 2024-12-09T11:01:35,505 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=146, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T11:01:35,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742198_1374 (size=399) 2024-12-09T11:01:35,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742198_1374 (size=399) 2024-12-09T11:01:35,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742198_1374 (size=399) 2024-12-09T11:01:35,531 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 0ca641e4aaa755d9220f84ac1f07ec06, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095497.0ca641e4aaa755d9220f84ac1f07ec06.', STARTKEY => '', ENDKEY => '2'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T11:01:35,538 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => a143a26c1939872db052f56b3aa41d8d, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1733742095497.a143a26c1939872db052f56b3aa41d8d.', STARTKEY => '2', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T11:01:35,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742199_1375 (size=85) 2024-12-09T11:01:35,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742199_1375 (size=85) 2024-12-09T11:01:35,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742199_1375 (size=85) 2024-12-09T11:01:35,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742200_1376 (size=85) 2024-12-09T11:01:35,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742200_1376 (size=85) 2024-12-09T11:01:35,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742200_1376 (size=85) 2024-12-09T11:01:35,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=146 2024-12-09T11:01:35,617 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1733742095497.a143a26c1939872db052f56b3aa41d8d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:01:35,617 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1722): Closing a143a26c1939872db052f56b3aa41d8d, disabling compactions & flushes 2024-12-09T11:01:35,617 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733742095497.a143a26c1939872db052f56b3aa41d8d. 2024-12-09T11:01:35,617 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733742095497.a143a26c1939872db052f56b3aa41d8d. 2024-12-09T11:01:35,617 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733742095497.a143a26c1939872db052f56b3aa41d8d. after waiting 0 ms 2024-12-09T11:01:35,617 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733742095497.a143a26c1939872db052f56b3aa41d8d. 2024-12-09T11:01:35,617 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1733742095497.a143a26c1939872db052f56b3aa41d8d. 2024-12-09T11:01:35,617 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1676): Region close journal for a143a26c1939872db052f56b3aa41d8d: Waiting for close lock at 1733742095617Disabling compacts and flushes for region at 1733742095617Disabling writes for close at 1733742095617Writing region close event to WAL at 1733742095617Closed at 1733742095617 2024-12-09T11:01:35,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=146 2024-12-09T11:01:35,993 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095497.0ca641e4aaa755d9220f84ac1f07ec06.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:01:35,993 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1722): Closing 0ca641e4aaa755d9220f84ac1f07ec06, disabling compactions & flushes 2024-12-09T11:01:35,993 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095497.0ca641e4aaa755d9220f84ac1f07ec06. 2024-12-09T11:01:35,993 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095497.0ca641e4aaa755d9220f84ac1f07ec06. 2024-12-09T11:01:35,993 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095497.0ca641e4aaa755d9220f84ac1f07ec06. after waiting 0 ms 2024-12-09T11:01:35,993 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095497.0ca641e4aaa755d9220f84ac1f07ec06. 2024-12-09T11:01:35,993 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095497.0ca641e4aaa755d9220f84ac1f07ec06. 2024-12-09T11:01:35,993 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1676): Region close journal for 0ca641e4aaa755d9220f84ac1f07ec06: Waiting for close lock at 1733742095993Disabling compacts and flushes for region at 1733742095993Disabling writes for close at 1733742095993Writing region close event to WAL at 1733742095993Closed at 1733742095993 2024-12-09T11:01:35,996 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=146, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T11:01:35,996 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1733742095497.a143a26c1939872db052f56b3aa41d8d.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1733742095996"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733742095996"}]},"ts":"1733742095996"} 2024-12-09T11:01:35,997 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095497.0ca641e4aaa755d9220f84ac1f07ec06.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1733742095996"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733742095996"}]},"ts":"1733742095996"} 2024-12-09T11:01:36,000 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-09T11:01:36,003 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=146, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T11:01:36,003 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733742096003"}]},"ts":"1733742096003"} 2024-12-09T11:01:36,006 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLING in hbase:meta 2024-12-09T11:01:36,007 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {3469f9ca0af3=0} racks are {/default-rack=0} 2024-12-09T11:01:36,009 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T11:01:36,009 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T11:01:36,009 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T11:01:36,009 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T11:01:36,009 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T11:01:36,009 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T11:01:36,009 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T11:01:36,009 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T11:01:36,009 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T11:01:36,009 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T11:01:36,010 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0ca641e4aaa755d9220f84ac1f07ec06, ASSIGN}, {pid=148, ppid=146, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=a143a26c1939872db052f56b3aa41d8d, ASSIGN}] 2024-12-09T11:01:36,014 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, ppid=146, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=a143a26c1939872db052f56b3aa41d8d, ASSIGN 2024-12-09T11:01:36,015 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=147, ppid=146, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0ca641e4aaa755d9220f84ac1f07ec06, ASSIGN 2024-12-09T11:01:36,018 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=148, ppid=146, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=a143a26c1939872db052f56b3aa41d8d, ASSIGN; state=OFFLINE, location=3469f9ca0af3,42349,1733741767108; forceNewPlan=false, retain=false 2024-12-09T11:01:36,018 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=147, ppid=146, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0ca641e4aaa755d9220f84ac1f07ec06, ASSIGN; state=OFFLINE, location=3469f9ca0af3,39691,1733741766880; forceNewPlan=false, retain=false 2024-12-09T11:01:36,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=146 2024-12-09T11:01:36,170 INFO [3469f9ca0af3:35815 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-09T11:01:36,171 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=148 updating hbase:meta row=a143a26c1939872db052f56b3aa41d8d, regionState=OPENING, regionLocation=3469f9ca0af3,42349,1733741767108 2024-12-09T11:01:36,172 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=147 updating hbase:meta row=0ca641e4aaa755d9220f84ac1f07ec06, regionState=OPENING, regionLocation=3469f9ca0af3,39691,1733741766880 2024-12-09T11:01:36,184 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=148, ppid=146, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=a143a26c1939872db052f56b3aa41d8d, ASSIGN because future has completed 2024-12-09T11:01:36,186 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE, hasLock=false; OpenRegionProcedure a143a26c1939872db052f56b3aa41d8d, server=3469f9ca0af3,42349,1733741767108}] 2024-12-09T11:01:36,191 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=147, ppid=146, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0ca641e4aaa755d9220f84ac1f07ec06, ASSIGN because future has completed 2024-12-09T11:01:36,192 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=150, ppid=147, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0ca641e4aaa755d9220f84ac1f07ec06, server=3469f9ca0af3,39691,1733741766880}] 2024-12-09T11:01:36,366 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,2,1733742095497.a143a26c1939872db052f56b3aa41d8d. 2024-12-09T11:01:36,366 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(7752): Opening region: {ENCODED => a143a26c1939872db052f56b3aa41d8d, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1733742095497.a143a26c1939872db052f56b3aa41d8d.', STARTKEY => '2', ENDKEY => ''} 2024-12-09T11:01:36,366 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1733742095497.a143a26c1939872db052f56b3aa41d8d. service=AccessControlService 2024-12-09T11:01:36,366 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T11:01:36,367 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 a143a26c1939872db052f56b3aa41d8d 2024-12-09T11:01:36,367 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1733742095497.a143a26c1939872db052f56b3aa41d8d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:01:36,367 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(7794): checking encryption for a143a26c1939872db052f56b3aa41d8d 2024-12-09T11:01:36,367 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(7797): checking classloading for a143a26c1939872db052f56b3aa41d8d 2024-12-09T11:01:36,378 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095497.0ca641e4aaa755d9220f84ac1f07ec06. 2024-12-09T11:01:36,378 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7752): Opening region: {ENCODED => 0ca641e4aaa755d9220f84ac1f07ec06, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095497.0ca641e4aaa755d9220f84ac1f07ec06.', STARTKEY => '', ENDKEY => '2'} 2024-12-09T11:01:36,378 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095497.0ca641e4aaa755d9220f84ac1f07ec06. service=AccessControlService 2024-12-09T11:01:36,378 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T11:01:36,379 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 0ca641e4aaa755d9220f84ac1f07ec06 2024-12-09T11:01:36,379 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095497.0ca641e4aaa755d9220f84ac1f07ec06.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:01:36,379 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7794): checking encryption for 0ca641e4aaa755d9220f84ac1f07ec06 2024-12-09T11:01:36,379 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7797): checking classloading for 0ca641e4aaa755d9220f84ac1f07ec06 2024-12-09T11:01:36,382 INFO [StoreOpener-a143a26c1939872db052f56b3aa41d8d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region a143a26c1939872db052f56b3aa41d8d 2024-12-09T11:01:36,398 INFO [StoreOpener-a143a26c1939872db052f56b3aa41d8d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a143a26c1939872db052f56b3aa41d8d columnFamilyName cf 2024-12-09T11:01:36,398 DEBUG [StoreOpener-a143a26c1939872db052f56b3aa41d8d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:01:36,400 INFO [StoreOpener-a143a26c1939872db052f56b3aa41d8d-1 {}] regionserver.HStore(327): Store=a143a26c1939872db052f56b3aa41d8d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:01:36,406 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(1038): replaying wal for a143a26c1939872db052f56b3aa41d8d 2024-12-09T11:01:36,410 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/a143a26c1939872db052f56b3aa41d8d 2024-12-09T11:01:36,410 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/a143a26c1939872db052f56b3aa41d8d 2024-12-09T11:01:36,411 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(1048): stopping wal replay for a143a26c1939872db052f56b3aa41d8d 2024-12-09T11:01:36,411 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(1060): Cleaning up temporary data for a143a26c1939872db052f56b3aa41d8d 2024-12-09T11:01:36,414 INFO [StoreOpener-0ca641e4aaa755d9220f84ac1f07ec06-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 0ca641e4aaa755d9220f84ac1f07ec06 2024-12-09T11:01:36,418 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(1093): writing seq id for a143a26c1939872db052f56b3aa41d8d 2024-12-09T11:01:36,421 INFO [StoreOpener-0ca641e4aaa755d9220f84ac1f07ec06-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0ca641e4aaa755d9220f84ac1f07ec06 columnFamilyName cf 2024-12-09T11:01:36,421 DEBUG [StoreOpener-0ca641e4aaa755d9220f84ac1f07ec06-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:01:36,421 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/a143a26c1939872db052f56b3aa41d8d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:01:36,422 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(1114): Opened a143a26c1939872db052f56b3aa41d8d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66114332, jitterRate=-0.014819681644439697}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T11:01:36,422 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a143a26c1939872db052f56b3aa41d8d 2024-12-09T11:01:36,423 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(1006): Region open journal for a143a26c1939872db052f56b3aa41d8d: Running coprocessor pre-open hook at 1733742096367Writing region info on filesystem at 1733742096367Initializing all the Stores at 1733742096374 (+7 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733742096374Cleaning up temporary data from old regions at 1733742096411 (+37 ms)Running coprocessor post-open hooks at 1733742096422 (+11 ms)Region opened successfully at 1733742096423 (+1 ms) 2024-12-09T11:01:36,424 INFO [StoreOpener-0ca641e4aaa755d9220f84ac1f07ec06-1 {}] regionserver.HStore(327): Store=0ca641e4aaa755d9220f84ac1f07ec06/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:01:36,424 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1038): replaying wal for 0ca641e4aaa755d9220f84ac1f07ec06 2024-12-09T11:01:36,425 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0ca641e4aaa755d9220f84ac1f07ec06 2024-12-09T11:01:36,426 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0ca641e4aaa755d9220f84ac1f07ec06 2024-12-09T11:01:36,427 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1048): stopping wal replay for 0ca641e4aaa755d9220f84ac1f07ec06 2024-12-09T11:01:36,427 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1060): Cleaning up temporary data for 0ca641e4aaa755d9220f84ac1f07ec06 2024-12-09T11:01:36,429 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,2,1733742095497.a143a26c1939872db052f56b3aa41d8d., pid=149, masterSystemTime=1733742096350 2024-12-09T11:01:36,431 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1093): writing seq id for 0ca641e4aaa755d9220f84ac1f07ec06 2024-12-09T11:01:36,439 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,2,1733742095497.a143a26c1939872db052f56b3aa41d8d. 2024-12-09T11:01:36,439 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,2,1733742095497.a143a26c1939872db052f56b3aa41d8d. 2024-12-09T11:01:36,440 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=148 updating hbase:meta row=a143a26c1939872db052f56b3aa41d8d, regionState=OPEN, openSeqNum=2, regionLocation=3469f9ca0af3,42349,1733741767108 2024-12-09T11:01:36,441 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0ca641e4aaa755d9220f84ac1f07ec06/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:01:36,441 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1114): Opened 0ca641e4aaa755d9220f84ac1f07ec06; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61639410, jitterRate=-0.08150121569633484}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T11:01:36,441 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0ca641e4aaa755d9220f84ac1f07ec06 2024-12-09T11:01:36,441 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1006): Region open journal for 0ca641e4aaa755d9220f84ac1f07ec06: Running coprocessor pre-open hook at 1733742096382Writing region info on filesystem at 1733742096382Initializing all the Stores at 1733742096386 (+4 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733742096386Cleaning up temporary data from old regions at 1733742096427 (+41 ms)Running coprocessor post-open hooks at 1733742096441 (+14 ms)Region opened successfully at 1733742096441 2024-12-09T11:01:36,447 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095497.0ca641e4aaa755d9220f84ac1f07ec06., pid=150, masterSystemTime=1733742096368 2024-12-09T11:01:36,452 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095497.0ca641e4aaa755d9220f84ac1f07ec06. 2024-12-09T11:01:36,452 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095497.0ca641e4aaa755d9220f84ac1f07ec06. 2024-12-09T11:01:36,453 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=147 updating hbase:meta row=0ca641e4aaa755d9220f84ac1f07ec06, regionState=OPEN, openSeqNum=2, regionLocation=3469f9ca0af3,39691,1733741766880 2024-12-09T11:01:36,453 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=149, ppid=148, state=RUNNABLE, hasLock=false; OpenRegionProcedure a143a26c1939872db052f56b3aa41d8d, server=3469f9ca0af3,42349,1733741767108 because future has completed 2024-12-09T11:01:36,460 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=150, ppid=147, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0ca641e4aaa755d9220f84ac1f07ec06, server=3469f9ca0af3,39691,1733741766880 because future has completed 2024-12-09T11:01:36,469 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T11:01:36,469 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion Metrics about Tables on a single HBase RegionServer 2024-12-09T11:01:36,470 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T11:01:36,470 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 Metrics about Tables on a single HBase RegionServer 2024-12-09T11:01:36,471 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-09T11:01:36,472 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=149, resume processing ppid=148 2024-12-09T11:01:36,472 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=149, ppid=148, state=SUCCESS, hasLock=false; OpenRegionProcedure a143a26c1939872db052f56b3aa41d8d, server=3469f9ca0af3,42349,1733741767108 in 275 msec 2024-12-09T11:01:36,474 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=150, resume processing ppid=147 2024-12-09T11:01:36,478 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=150, ppid=147, state=SUCCESS, hasLock=false; OpenRegionProcedure 0ca641e4aaa755d9220f84ac1f07ec06, server=3469f9ca0af3,39691,1733741766880 in 278 msec 2024-12-09T11:01:36,482 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=148, ppid=146, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=a143a26c1939872db052f56b3aa41d8d, ASSIGN in 464 msec 2024-12-09T11:01:36,493 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=147, resume processing ppid=146 2024-12-09T11:01:36,493 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=147, ppid=146, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0ca641e4aaa755d9220f84ac1f07ec06, ASSIGN in 469 msec 2024-12-09T11:01:36,494 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=146, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T11:01:36,495 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733742096494"}]},"ts":"1733742096494"} 2024-12-09T11:01:36,498 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLED in hbase:meta 2024-12-09T11:01:36,500 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=146, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T11:01:36,500 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion-1 jenkins: RWXCA 2024-12-09T11:01:36,518 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39691 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-09T11:01:36,523 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:01:36,523 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:01:36,523 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:01:36,523 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:01:36,531 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T11:01:36,532 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-09T11:01:36,532 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T11:01:36,532 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T11:01:36,532 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-09T11:01:36,532 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-09T11:01:36,532 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T11:01:36,533 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-09T11:01:36,536 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=146, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 1.0340 sec 2024-12-09T11:01:36,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=146 2024-12-09T11:01:36,653 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-09T11:01:36,658 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='1', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095497.0ca641e4aaa755d9220f84ac1f07ec06., hostname=3469f9ca0af3,39691,1733741766880, seqNum=2] 2024-12-09T11:01:36,672 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='2', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1733742095497.a143a26c1939872db052f56b3aa41d8d., hostname=3469f9ca0af3,42349,1733741767108, seqNum=2] 2024-12-09T11:01:36,686 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion-1,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion-1 ,, for max=2147483647 with caching=100 2024-12-09T11:01:36,717 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster$2(2278): Client=jenkins//172.17.0.2 merge regions [0ca641e4aaa755d9220f84ac1f07ec06, a143a26c1939872db052f56b3aa41d8d] 2024-12-09T11:01:36,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=151, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[0ca641e4aaa755d9220f84ac1f07ec06, a143a26c1939872db052f56b3aa41d8d], force=true 2024-12-09T11:01:36,726 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=151, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[0ca641e4aaa755d9220f84ac1f07ec06, a143a26c1939872db052f56b3aa41d8d], force=true 2024-12-09T11:01:36,726 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=151, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[0ca641e4aaa755d9220f84ac1f07ec06, a143a26c1939872db052f56b3aa41d8d], force=true 2024-12-09T11:01:36,727 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=151, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[0ca641e4aaa755d9220f84ac1f07ec06, a143a26c1939872db052f56b3aa41d8d], force=true 2024-12-09T11:01:36,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=151 2024-12-09T11:01:36,746 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0ca641e4aaa755d9220f84ac1f07ec06, UNASSIGN}, {pid=153, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=a143a26c1939872db052f56b3aa41d8d, UNASSIGN}] 2024-12-09T11:01:36,752 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=153, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=a143a26c1939872db052f56b3aa41d8d, UNASSIGN 2024-12-09T11:01:36,753 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0ca641e4aaa755d9220f84ac1f07ec06, UNASSIGN 2024-12-09T11:01:36,758 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=a143a26c1939872db052f56b3aa41d8d, regionState=CLOSING, regionLocation=3469f9ca0af3,42349,1733741767108 2024-12-09T11:01:36,759 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=152 updating hbase:meta row=0ca641e4aaa755d9220f84ac1f07ec06, regionState=CLOSING, regionLocation=3469f9ca0af3,39691,1733741766880 2024-12-09T11:01:36,772 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=153, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=a143a26c1939872db052f56b3aa41d8d, UNASSIGN because future has completed 2024-12-09T11:01:36,773 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-09T11:01:36,773 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE, hasLock=false; CloseRegionProcedure a143a26c1939872db052f56b3aa41d8d, server=3469f9ca0af3,42349,1733741767108}] 2024-12-09T11:01:36,774 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0ca641e4aaa755d9220f84ac1f07ec06, UNASSIGN because future has completed 2024-12-09T11:01:36,786 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-09T11:01:36,786 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=155, ppid=152, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0ca641e4aaa755d9220f84ac1f07ec06, server=3469f9ca0af3,39691,1733741766880}] 2024-12-09T11:01:36,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=151 2024-12-09T11:01:36,949 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] handler.UnassignRegionHandler(122): Close a143a26c1939872db052f56b3aa41d8d 2024-12-09T11:01:36,949 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-09T11:01:36,950 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1722): Closing a143a26c1939872db052f56b3aa41d8d, disabling compactions & flushes 2024-12-09T11:01:36,950 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733742095497.a143a26c1939872db052f56b3aa41d8d. 2024-12-09T11:01:36,950 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733742095497.a143a26c1939872db052f56b3aa41d8d. 2024-12-09T11:01:36,950 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733742095497.a143a26c1939872db052f56b3aa41d8d. after waiting 0 ms 2024-12-09T11:01:36,950 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733742095497.a143a26c1939872db052f56b3aa41d8d. 2024-12-09T11:01:36,950 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(2902): Flushing a143a26c1939872db052f56b3aa41d8d 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-09T11:01:36,978 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(122): Close 0ca641e4aaa755d9220f84ac1f07ec06 2024-12-09T11:01:36,978 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-09T11:01:36,979 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1722): Closing 0ca641e4aaa755d9220f84ac1f07ec06, disabling compactions & flushes 2024-12-09T11:01:36,979 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095497.0ca641e4aaa755d9220f84ac1f07ec06. 2024-12-09T11:01:36,979 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095497.0ca641e4aaa755d9220f84ac1f07ec06. 2024-12-09T11:01:36,979 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095497.0ca641e4aaa755d9220f84ac1f07ec06. after waiting 0 ms 2024-12-09T11:01:36,979 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095497.0ca641e4aaa755d9220f84ac1f07ec06. 2024-12-09T11:01:36,979 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(2902): Flushing 0ca641e4aaa755d9220f84ac1f07ec06 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-09T11:01:37,001 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/a143a26c1939872db052f56b3aa41d8d/.tmp/cf/e7750260d32e4355bb1844f2dc7b4be6 is 28, key is 2/cf:/1733742096674/Put/seqid=0 2024-12-09T11:01:37,046 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0ca641e4aaa755d9220f84ac1f07ec06/.tmp/cf/6c03af4923b44125a47519e1efea8f22 is 28, key is 1/cf:/1733742096664/Put/seqid=0 2024-12-09T11:01:37,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=151 2024-12-09T11:01:37,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742201_1377 (size=4945) 2024-12-09T11:01:37,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742201_1377 (size=4945) 2024-12-09T11:01:37,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742201_1377 (size=4945) 2024-12-09T11:01:37,074 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/a143a26c1939872db052f56b3aa41d8d/.tmp/cf/e7750260d32e4355bb1844f2dc7b4be6 2024-12-09T11:01:37,087 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0006_000001 (auth:SIMPLE) from 127.0.0.1:55374 2024-12-09T11:01:37,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742202_1378 (size=4945) 2024-12-09T11:01:37,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742202_1378 (size=4945) 2024-12-09T11:01:37,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742202_1378 (size=4945) 2024-12-09T11:01:37,101 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0ca641e4aaa755d9220f84ac1f07ec06/.tmp/cf/6c03af4923b44125a47519e1efea8f22 2024-12-09T11:01:37,101 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/a143a26c1939872db052f56b3aa41d8d/.tmp/cf/e7750260d32e4355bb1844f2dc7b4be6 as hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/a143a26c1939872db052f56b3aa41d8d/cf/e7750260d32e4355bb1844f2dc7b4be6 2024-12-09T11:01:37,110 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/a143a26c1939872db052f56b3aa41d8d/cf/e7750260d32e4355bb1844f2dc7b4be6, entries=1, sequenceid=5, filesize=4.8 K 2024-12-09T11:01:37,115 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for a143a26c1939872db052f56b3aa41d8d in 165ms, sequenceid=5, compaction requested=false 2024-12-09T11:01:37,116 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion-1' 2024-12-09T11:01:37,117 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0ca641e4aaa755d9220f84ac1f07ec06/.tmp/cf/6c03af4923b44125a47519e1efea8f22 as hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0ca641e4aaa755d9220f84ac1f07ec06/cf/6c03af4923b44125a47519e1efea8f22 2024-12-09T11:01:37,125 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/a143a26c1939872db052f56b3aa41d8d/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-09T11:01:37,126 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T11:01:37,126 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1733742095497.a143a26c1939872db052f56b3aa41d8d. 2024-12-09T11:01:37,126 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1676): Region close journal for a143a26c1939872db052f56b3aa41d8d: Waiting for close lock at 1733742096950Running coprocessor pre-close hooks at 1733742096950Disabling compacts and flushes for region at 1733742096950Disabling writes for close at 1733742096950Obtaining lock to block concurrent updates at 1733742096950Preparing flush snapshotting stores in a143a26c1939872db052f56b3aa41d8d at 1733742096950Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,2,1733742095497.a143a26c1939872db052f56b3aa41d8d., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1733742096951 (+1 ms)Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,2,1733742095497.a143a26c1939872db052f56b3aa41d8d. at 1733742096954 (+3 ms)Flushing a143a26c1939872db052f56b3aa41d8d/cf: creating writer at 1733742096954Flushing a143a26c1939872db052f56b3aa41d8d/cf: appending metadata at 1733742097001 (+47 ms)Flushing a143a26c1939872db052f56b3aa41d8d/cf: closing flushed file at 1733742097001Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2c012e81: reopening flushed file at 1733742097094 (+93 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for a143a26c1939872db052f56b3aa41d8d in 165ms, sequenceid=5, compaction requested=false at 1733742097115 (+21 ms)Writing region close event to WAL at 1733742097117 (+2 ms)Running coprocessor post-close hooks at 1733742097126 (+9 ms)Closed at 1733742097126 2024-12-09T11:01:37,139 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] handler.UnassignRegionHandler(157): Closed a143a26c1939872db052f56b3aa41d8d 2024-12-09T11:01:37,140 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=a143a26c1939872db052f56b3aa41d8d, regionState=CLOSED 2024-12-09T11:01:37,143 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0ca641e4aaa755d9220f84ac1f07ec06/cf/6c03af4923b44125a47519e1efea8f22, entries=1, sequenceid=5, filesize=4.8 K 2024-12-09T11:01:37,150 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 0ca641e4aaa755d9220f84ac1f07ec06 in 171ms, sequenceid=5, compaction requested=false 2024-12-09T11:01:37,157 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0ca641e4aaa755d9220f84ac1f07ec06/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-09T11:01:37,158 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T11:01:37,158 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095497.0ca641e4aaa755d9220f84ac1f07ec06. 2024-12-09T11:01:37,158 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1676): Region close journal for 0ca641e4aaa755d9220f84ac1f07ec06: Waiting for close lock at 1733742096978Running coprocessor pre-close hooks at 1733742096978Disabling compacts and flushes for region at 1733742096978Disabling writes for close at 1733742096979 (+1 ms)Obtaining lock to block concurrent updates at 1733742096979Preparing flush snapshotting stores in 0ca641e4aaa755d9220f84ac1f07ec06 at 1733742096979Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095497.0ca641e4aaa755d9220f84ac1f07ec06., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1733742096979Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095497.0ca641e4aaa755d9220f84ac1f07ec06. at 1733742096984 (+5 ms)Flushing 0ca641e4aaa755d9220f84ac1f07ec06/cf: creating writer at 1733742096984Flushing 0ca641e4aaa755d9220f84ac1f07ec06/cf: appending metadata at 1733742097045 (+61 ms)Flushing 0ca641e4aaa755d9220f84ac1f07ec06/cf: closing flushed file at 1733742097045Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@300672f8: reopening flushed file at 1733742097111 (+66 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 0ca641e4aaa755d9220f84ac1f07ec06 in 171ms, sequenceid=5, compaction requested=false at 1733742097150 (+39 ms)Writing region close event to WAL at 1733742097152 (+2 ms)Running coprocessor post-close hooks at 1733742097158 (+6 ms)Closed at 1733742097158 2024-12-09T11:01:37,162 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=154, ppid=153, state=RUNNABLE, hasLock=false; CloseRegionProcedure a143a26c1939872db052f56b3aa41d8d, server=3469f9ca0af3,42349,1733741767108 because future has completed 2024-12-09T11:01:37,164 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(157): Closed 0ca641e4aaa755d9220f84ac1f07ec06 2024-12-09T11:01:37,165 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=152 updating hbase:meta row=0ca641e4aaa755d9220f84ac1f07ec06, regionState=CLOSED 2024-12-09T11:01:37,167 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=154, resume processing ppid=153 2024-12-09T11:01:37,167 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=154, ppid=153, state=SUCCESS, hasLock=false; CloseRegionProcedure a143a26c1939872db052f56b3aa41d8d, server=3469f9ca0af3,42349,1733741767108 in 390 msec 2024-12-09T11:01:37,169 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=153, ppid=151, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=a143a26c1939872db052f56b3aa41d8d, UNASSIGN in 421 msec 2024-12-09T11:01:37,172 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=155, ppid=152, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0ca641e4aaa755d9220f84ac1f07ec06, server=3469f9ca0af3,39691,1733741766880 because future has completed 2024-12-09T11:01:37,178 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=155, resume processing ppid=152 2024-12-09T11:01:37,178 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=155, ppid=152, state=SUCCESS, hasLock=false; CloseRegionProcedure 0ca641e4aaa755d9220f84ac1f07ec06, server=3469f9ca0af3,39691,1733741766880 in 387 msec 2024-12-09T11:01:37,187 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=152, resume processing ppid=151 2024-12-09T11:01:37,187 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=152, ppid=151, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=0ca641e4aaa755d9220f84ac1f07ec06, UNASSIGN in 432 msec 2024-12-09T11:01:37,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742203_1379 (size=84) 2024-12-09T11:01:37,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742203_1379 (size=84) 2024-12-09T11:01:37,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742203_1379 (size=84) 2024-12-09T11:01:37,241 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:01:37,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742204_1380 (size=20) 2024-12-09T11:01:37,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742204_1380 (size=20) 2024-12-09T11:01:37,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742204_1380 (size=20) 2024-12-09T11:01:37,310 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:01:37,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=151 2024-12-09T11:01:37,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742205_1381 (size=21) 2024-12-09T11:01:37,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742205_1381 (size=21) 2024-12-09T11:01:37,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742205_1381 (size=21) 2024-12-09T11:01:37,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742206_1382 (size=84) 2024-12-09T11:01:37,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742206_1382 (size=84) 2024-12-09T11:01:37,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742206_1382 (size=84) 2024-12-09T11:01:37,447 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:01:37,493 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4642d1cde05785f48336193c9593c4ad/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=-1 2024-12-09T11:01:37,498 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095497.0ca641e4aaa755d9220f84ac1f07ec06.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-09T11:01:37,498 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1733742095497.a143a26c1939872db052f56b3aa41d8d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-09T11:01:37,498 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":7,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095498.4642d1cde05785f48336193c9593c4ad.","families":{"info":[{"qualifier":"regioninfo","vlen":83,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0000","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0001","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-09T11:01:37,513 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=156, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=4642d1cde05785f48336193c9593c4ad, ASSIGN}] 2024-12-09T11:01:37,518 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=156, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=4642d1cde05785f48336193c9593c4ad, ASSIGN 2024-12-09T11:01:37,522 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=156, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=4642d1cde05785f48336193c9593c4ad, ASSIGN; state=MERGED, location=3469f9ca0af3,39691,1733741766880; forceNewPlan=false, retain=false 2024-12-09T11:01:37,674 INFO [3469f9ca0af3:35815 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-09T11:01:37,674 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=156 updating hbase:meta row=4642d1cde05785f48336193c9593c4ad, regionState=OPENING, regionLocation=3469f9ca0af3,39691,1733741766880 2024-12-09T11:01:37,683 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=156, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=4642d1cde05785f48336193c9593c4ad, ASSIGN because future has completed 2024-12-09T11:01:37,684 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=157, ppid=156, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4642d1cde05785f48336193c9593c4ad, server=3469f9ca0af3,39691,1733741766880}] 2024-12-09T11:01:37,849 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095498.4642d1cde05785f48336193c9593c4ad. 2024-12-09T11:01:37,850 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(7752): Opening region: {ENCODED => 4642d1cde05785f48336193c9593c4ad, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095498.4642d1cde05785f48336193c9593c4ad.', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:01:37,850 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095498.4642d1cde05785f48336193c9593c4ad. service=AccessControlService 2024-12-09T11:01:37,850 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T11:01:37,850 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 4642d1cde05785f48336193c9593c4ad 2024-12-09T11:01:37,851 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095498.4642d1cde05785f48336193c9593c4ad.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:01:37,851 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(7794): checking encryption for 4642d1cde05785f48336193c9593c4ad 2024-12-09T11:01:37,851 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(7797): checking classloading for 4642d1cde05785f48336193c9593c4ad 2024-12-09T11:01:37,853 INFO [StoreOpener-4642d1cde05785f48336193c9593c4ad-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 4642d1cde05785f48336193c9593c4ad 2024-12-09T11:01:37,854 INFO [StoreOpener-4642d1cde05785f48336193c9593c4ad-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4642d1cde05785f48336193c9593c4ad columnFamilyName cf 2024-12-09T11:01:37,854 DEBUG [StoreOpener-4642d1cde05785f48336193c9593c4ad-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:01:37,867 DEBUG [StoreOpener-4642d1cde05785f48336193c9593c4ad-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4642d1cde05785f48336193c9593c4ad/cf/6c03af4923b44125a47519e1efea8f22.0ca641e4aaa755d9220f84ac1f07ec06->hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0ca641e4aaa755d9220f84ac1f07ec06/cf/6c03af4923b44125a47519e1efea8f22-top 2024-12-09T11:01:37,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=151 2024-12-09T11:01:37,883 DEBUG [StoreOpener-4642d1cde05785f48336193c9593c4ad-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4642d1cde05785f48336193c9593c4ad/cf/e7750260d32e4355bb1844f2dc7b4be6.a143a26c1939872db052f56b3aa41d8d->hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/a143a26c1939872db052f56b3aa41d8d/cf/e7750260d32e4355bb1844f2dc7b4be6-top 2024-12-09T11:01:37,884 INFO [StoreOpener-4642d1cde05785f48336193c9593c4ad-1 {}] regionserver.HStore(327): Store=4642d1cde05785f48336193c9593c4ad/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:01:37,884 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(1038): replaying wal for 4642d1cde05785f48336193c9593c4ad 2024-12-09T11:01:37,885 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4642d1cde05785f48336193c9593c4ad 2024-12-09T11:01:37,887 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4642d1cde05785f48336193c9593c4ad 2024-12-09T11:01:37,887 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(1048): stopping wal replay for 4642d1cde05785f48336193c9593c4ad 2024-12-09T11:01:37,887 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(1060): Cleaning up temporary data for 4642d1cde05785f48336193c9593c4ad 2024-12-09T11:01:37,889 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(1093): writing seq id for 4642d1cde05785f48336193c9593c4ad 2024-12-09T11:01:37,890 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(1114): Opened 4642d1cde05785f48336193c9593c4ad; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68707877, jitterRate=0.023827150464057922}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T11:01:37,890 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4642d1cde05785f48336193c9593c4ad 2024-12-09T11:01:37,891 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(1006): Region open journal for 4642d1cde05785f48336193c9593c4ad: Running coprocessor pre-open hook at 1733742097851Writing region info on filesystem at 1733742097851Initializing all the Stores at 1733742097852 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733742097852Cleaning up temporary data from old regions at 1733742097887 (+35 ms)Running coprocessor post-open hooks at 1733742097890 (+3 ms)Region opened successfully at 1733742097891 (+1 ms) 2024-12-09T11:01:37,892 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095498.4642d1cde05785f48336193c9593c4ad., pid=157, masterSystemTime=1733742097841 2024-12-09T11:01:37,892 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.CompactSplit(342): Ignoring compaction request for testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095498.4642d1cde05785f48336193c9593c4ad.,because compaction is disabled. 2024-12-09T11:01:37,898 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095498.4642d1cde05785f48336193c9593c4ad. 2024-12-09T11:01:37,898 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095498.4642d1cde05785f48336193c9593c4ad. 2024-12-09T11:01:37,902 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=156 updating hbase:meta row=4642d1cde05785f48336193c9593c4ad, regionState=OPEN, openSeqNum=9, regionLocation=3469f9ca0af3,39691,1733741766880 2024-12-09T11:01:37,904 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=157, ppid=156, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4642d1cde05785f48336193c9593c4ad, server=3469f9ca0af3,39691,1733741766880 because future has completed 2024-12-09T11:01:37,918 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=157, resume processing ppid=156 2024-12-09T11:01:37,918 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=157, ppid=156, state=SUCCESS, hasLock=false; OpenRegionProcedure 4642d1cde05785f48336193c9593c4ad, server=3469f9ca0af3,39691,1733741766880 in 226 msec 2024-12-09T11:01:37,920 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=156, resume processing ppid=151 2024-12-09T11:01:37,920 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=156, ppid=151, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=4642d1cde05785f48336193c9593c4ad, ASSIGN in 405 msec 2024-12-09T11:01:37,922 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=151, state=SUCCESS, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[0ca641e4aaa755d9220f84ac1f07ec06, a143a26c1939872db052f56b3aa41d8d], force=true in 1.2000 sec 2024-12-09T11:01:38,496 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T11:01:38,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=151 2024-12-09T11:01:38,893 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: MERGE_REGIONS, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-09T11:01:38,893 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-09T11:01:38,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733742098893 (current time:1733742098893). 2024-12-09T11:01:38,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T11:01:38,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 VERSION not specified, setting to 2 2024-12-09T11:01:38,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T11:01:38,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@321510c7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:01:38,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T11:01:38,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T11:01:38,895 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T11:01:38,895 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T11:01:38,895 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T11:01:38,895 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e08b56c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:01:38,896 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T11:01:38,896 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T11:01:38,896 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:01:38,898 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42446, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T11:01:38,900 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ce3fbb1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:01:38,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:01:38,902 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,33293,1733741767044, seqNum=-1] 2024-12-09T11:01:38,902 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:01:38,904 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50106, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:01:38,906 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815. 2024-12-09T11:01:38,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T11:01:38,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:01:38,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:01:38,906 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T11:01:38,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18a2f13c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:01:38,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T11:01:38,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T11:01:38,908 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T11:01:38,908 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T11:01:38,908 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T11:01:38,909 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b8e5b9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:01:38,909 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T11:01:38,911 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T11:01:38,911 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:01:38,912 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42474, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T11:01:38,913 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e87df8d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:01:38,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:01:38,916 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,33293,1733741767044, seqNum=-1] 2024-12-09T11:01:38,916 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:01:38,917 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50112, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:01:38,924 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion-1', locateType=CURRENT is [region=hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb., hostname=3469f9ca0af3,39691,1733741766880, seqNum=2] 2024-12-09T11:01:38,924 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:01:38,928 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53264, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:01:38,930 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815. 2024-12-09T11:01:38,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T11:01:38,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:01:38,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:01:38,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-09T11:01:38,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T11:01:38,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=158, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=158, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-09T11:01:38,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 158 2024-12-09T11:01:38,933 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T11:01:38,934 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=158, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=158, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T11:01:38,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=158 2024-12-09T11:01:38,935 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=158, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=158, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T11:01:38,938 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=158, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=158, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T11:01:38,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742207_1383 (size=216) 2024-12-09T11:01:38,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742207_1383 (size=216) 2024-12-09T11:01:38,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742207_1383 (size=216) 2024-12-09T11:01:38,955 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=158, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=158, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T11:01:38,956 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=159, ppid=158, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4642d1cde05785f48336193c9593c4ad}] 2024-12-09T11:01:38,958 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=159, ppid=158, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4642d1cde05785f48336193c9593c4ad 2024-12-09T11:01:39,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=158 2024-12-09T11:01:39,113 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39691 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=159 2024-12-09T11:01:39,114 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=159}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095498.4642d1cde05785f48336193c9593c4ad. 2024-12-09T11:01:39,114 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=159}] regionserver.HRegion(2603): Flush status journal for 4642d1cde05785f48336193c9593c4ad: 2024-12-09T11:01:39,114 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=159}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095498.4642d1cde05785f48336193c9593c4ad. for snaptb0-testExportFileSystemStateWithMergeRegion-1 completed. 2024-12-09T11:01:39,114 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=159}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095498.4642d1cde05785f48336193c9593c4ad.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-09T11:01:39,114 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=159}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T11:01:39,114 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=159}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4642d1cde05785f48336193c9593c4ad/cf/6c03af4923b44125a47519e1efea8f22.0ca641e4aaa755d9220f84ac1f07ec06->hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0ca641e4aaa755d9220f84ac1f07ec06/cf/6c03af4923b44125a47519e1efea8f22-top, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4642d1cde05785f48336193c9593c4ad/cf/e7750260d32e4355bb1844f2dc7b4be6.a143a26c1939872db052f56b3aa41d8d->hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/a143a26c1939872db052f56b3aa41d8d/cf/e7750260d32e4355bb1844f2dc7b4be6-top] hfiles 2024-12-09T11:01:39,115 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=159}] snapshot.SnapshotManifest(265): Adding reference for file (1/2): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4642d1cde05785f48336193c9593c4ad/cf/6c03af4923b44125a47519e1efea8f22.0ca641e4aaa755d9220f84ac1f07ec06 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-09T11:01:39,115 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=159}] snapshot.SnapshotManifest(265): Adding reference for file (2/2): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4642d1cde05785f48336193c9593c4ad/cf/e7750260d32e4355bb1844f2dc7b4be6.a143a26c1939872db052f56b3aa41d8d for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-09T11:01:39,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742208_1384 (size=269) 2024-12-09T11:01:39,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742208_1384 (size=269) 2024-12-09T11:01:39,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742208_1384 (size=269) 2024-12-09T11:01:39,166 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=159}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095498.4642d1cde05785f48336193c9593c4ad. 2024-12-09T11:01:39,166 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=159}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=159 2024-12-09T11:01:39,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=159 2024-12-09T11:01:39,167 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 on region 4642d1cde05785f48336193c9593c4ad 2024-12-09T11:01:39,167 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=159, ppid=158, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4642d1cde05785f48336193c9593c4ad 2024-12-09T11:01:39,171 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=159, resume processing ppid=158 2024-12-09T11:01:39,171 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=159, ppid=158, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 4642d1cde05785f48336193c9593c4ad in 212 msec 2024-12-09T11:01:39,171 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=158, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=158, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T11:01:39,172 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=158, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=158, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T11:01:39,173 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=158, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=158, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T11:01:39,173 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-09T11:01:39,174 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-09T11:01:39,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742209_1385 (size=670) 2024-12-09T11:01:39,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742209_1385 (size=670) 2024-12-09T11:01:39,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742209_1385 (size=670) 2024-12-09T11:01:39,205 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=158, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=158, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T11:01:39,218 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=158, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=158, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T11:01:39,218 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-09T11:01:39,220 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=158, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=158, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T11:01:39,220 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 158 2024-12-09T11:01:39,223 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=158, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=158, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } in 289 msec 2024-12-09T11:01:39,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=158 2024-12-09T11:01:39,253 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-09T11:01:39,253 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742099253 2024-12-09T11:01:39,253 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:35869, tgtDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742099253, rawTgtDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742099253, srcFsUri=hdfs://localhost:35869, srcDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T11:01:39,299 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:35869, inputRoot=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T11:01:39,299 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1751713061_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742099253, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742099253/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-09T11:01:39,302 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-09T11:01:39,308 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742099253/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-09T11:01:39,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742210_1386 (size=216) 2024-12-09T11:01:39,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742210_1386 (size=216) 2024-12-09T11:01:39,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742210_1386 (size=216) 2024-12-09T11:01:39,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742211_1387 (size=670) 2024-12-09T11:01:39,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742211_1387 (size=670) 2024-12-09T11:01:39,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742211_1387 (size=670) 2024-12-09T11:01:39,756 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:01:39,756 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:01:39,756 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:01:41,231 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/hadoop-1246566032856261069.jar 2024-12-09T11:01:41,231 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:01:41,232 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:01:41,355 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/hadoop-10921292725330092439.jar 2024-12-09T11:01:41,355 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:01:41,356 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:01:41,356 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:01:41,356 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:01:41,357 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:01:41,357 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:01:41,357 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-09T11:01:41,358 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-09T11:01:41,358 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-09T11:01:41,358 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-09T11:01:41,359 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-09T11:01:41,359 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-09T11:01:41,359 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-09T11:01:41,360 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-09T11:01:41,360 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-09T11:01:41,360 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-09T11:01:41,361 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-09T11:01:41,361 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T11:01:41,361 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T11:01:41,362 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T11:01:41,362 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T11:01:41,362 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T11:01:41,363 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T11:01:41,363 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T11:01:41,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742212_1388 (size=24020) 2024-12-09T11:01:41,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742212_1388 (size=24020) 2024-12-09T11:01:41,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742212_1388 (size=24020) 2024-12-09T11:01:41,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742213_1389 (size=77755) 2024-12-09T11:01:41,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742213_1389 (size=77755) 2024-12-09T11:01:41,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742213_1389 (size=77755) 2024-12-09T11:01:41,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742214_1390 (size=131360) 2024-12-09T11:01:41,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742214_1390 (size=131360) 2024-12-09T11:01:41,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742214_1390 (size=131360) 2024-12-09T11:01:41,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742215_1391 (size=111793) 2024-12-09T11:01:41,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742215_1391 (size=111793) 2024-12-09T11:01:41,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742215_1391 (size=111793) 2024-12-09T11:01:41,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742216_1392 (size=6425021) 2024-12-09T11:01:41,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742216_1392 (size=6425021) 2024-12-09T11:01:41,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742216_1392 (size=6425021) 2024-12-09T11:01:41,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742217_1393 (size=1832290) 2024-12-09T11:01:41,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742217_1393 (size=1832290) 2024-12-09T11:01:41,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742217_1393 (size=1832290) 2024-12-09T11:01:42,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742218_1394 (size=8360282) 2024-12-09T11:01:42,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742218_1394 (size=8360282) 2024-12-09T11:01:42,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742218_1394 (size=8360282) 2024-12-09T11:01:42,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742219_1395 (size=503880) 2024-12-09T11:01:42,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742219_1395 (size=503880) 2024-12-09T11:01:42,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742219_1395 (size=503880) 2024-12-09T11:01:42,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742220_1396 (size=322274) 2024-12-09T11:01:42,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742220_1396 (size=322274) 2024-12-09T11:01:42,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742220_1396 (size=322274) 2024-12-09T11:01:42,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742221_1397 (size=20406) 2024-12-09T11:01:42,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742221_1397 (size=20406) 2024-12-09T11:01:42,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742221_1397 (size=20406) 2024-12-09T11:01:42,214 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_2/usercache/jenkins/appcache/application_1733741775522_0006/container_1733741775522_0006_01_000001/launch_container.sh] 2024-12-09T11:01:42,215 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_2/usercache/jenkins/appcache/application_1733741775522_0006/container_1733741775522_0006_01_000001/container_tokens] 2024-12-09T11:01:42,215 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_2/usercache/jenkins/appcache/application_1733741775522_0006/container_1733741775522_0006_01_000001/sysfs] 2024-12-09T11:01:42,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742222_1398 (size=45609) 2024-12-09T11:01:42,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742222_1398 (size=45609) 2024-12-09T11:01:42,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742222_1398 (size=45609) 2024-12-09T11:01:42,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742223_1399 (size=136454) 2024-12-09T11:01:42,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742223_1399 (size=136454) 2024-12-09T11:01:42,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742223_1399 (size=136454) 2024-12-09T11:01:42,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742224_1400 (size=1597136) 2024-12-09T11:01:42,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742224_1400 (size=1597136) 2024-12-09T11:01:42,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742224_1400 (size=1597136) 2024-12-09T11:01:42,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742225_1401 (size=30873) 2024-12-09T11:01:42,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742225_1401 (size=30873) 2024-12-09T11:01:42,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742225_1401 (size=30873) 2024-12-09T11:01:42,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742226_1402 (size=29229) 2024-12-09T11:01:42,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742226_1402 (size=29229) 2024-12-09T11:01:42,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742226_1402 (size=29229) 2024-12-09T11:01:42,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742227_1403 (size=903861) 2024-12-09T11:01:42,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742227_1403 (size=903861) 2024-12-09T11:01:42,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742227_1403 (size=903861) 2024-12-09T11:01:42,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742228_1404 (size=5175431) 2024-12-09T11:01:42,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742228_1404 (size=5175431) 2024-12-09T11:01:42,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742228_1404 (size=5175431) 2024-12-09T11:01:42,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742229_1405 (size=232881) 2024-12-09T11:01:42,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742229_1405 (size=232881) 2024-12-09T11:01:43,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742229_1405 (size=232881) 2024-12-09T11:01:43,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742230_1406 (size=1323991) 2024-12-09T11:01:43,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742230_1406 (size=1323991) 2024-12-09T11:01:43,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742230_1406 (size=1323991) 2024-12-09T11:01:43,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742231_1407 (size=4695811) 2024-12-09T11:01:43,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742231_1407 (size=4695811) 2024-12-09T11:01:43,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742231_1407 (size=4695811) 2024-12-09T11:01:43,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742232_1408 (size=1877034) 2024-12-09T11:01:43,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742232_1408 (size=1877034) 2024-12-09T11:01:43,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742232_1408 (size=1877034) 2024-12-09T11:01:43,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742233_1409 (size=443171) 2024-12-09T11:01:43,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742233_1409 (size=443171) 2024-12-09T11:01:43,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742233_1409 (size=443171) 2024-12-09T11:01:43,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742234_1410 (size=217555) 2024-12-09T11:01:43,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742234_1410 (size=217555) 2024-12-09T11:01:43,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742234_1410 (size=217555) 2024-12-09T11:01:43,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742235_1411 (size=4188619) 2024-12-09T11:01:43,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742235_1411 (size=4188619) 2024-12-09T11:01:43,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742235_1411 (size=4188619) 2024-12-09T11:01:43,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742236_1412 (size=127628) 2024-12-09T11:01:43,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742236_1412 (size=127628) 2024-12-09T11:01:43,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742236_1412 (size=127628) 2024-12-09T11:01:44,136 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-09T11:01:44,142 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemStateWithMergeRegion-1' hfile list 2024-12-09T11:01:44,145 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=4.8 K 2024-12-09T11:01:44,146 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=4.8 K 2024-12-09T11:01:44,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742237_1413 (size=481) 2024-12-09T11:01:44,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742237_1413 (size=481) 2024-12-09T11:01:44,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742237_1413 (size=481) 2024-12-09T11:01:44,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742238_1414 (size=21) 2024-12-09T11:01:44,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742238_1414 (size=21) 2024-12-09T11:01:44,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742238_1414 (size=21) 2024-12-09T11:01:44,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742239_1415 (size=304136) 2024-12-09T11:01:44,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742239_1415 (size=304136) 2024-12-09T11:01:44,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742239_1415 (size=304136) 2024-12-09T11:01:44,832 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T11:01:44,832 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T11:01:45,078 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0007_000001 (auth:SIMPLE) from 127.0.0.1:50692 2024-12-09T11:01:50,682 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0007_000001 (auth:SIMPLE) from 127.0.0.1:39800 2024-12-09T11:01:50,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742240_1416 (size=349834) 2024-12-09T11:01:50,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742240_1416 (size=349834) 2024-12-09T11:01:50,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742240_1416 (size=349834) 2024-12-09T11:01:52,994 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0007_000001 (auth:SIMPLE) from 127.0.0.1:48850 2024-12-09T11:01:52,994 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0007_000001 (auth:SIMPLE) from 127.0.0.1:60164 2024-12-09T11:01:57,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742241_1417 (size=4945) 2024-12-09T11:01:57,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742241_1417 (size=4945) 2024-12-09T11:01:57,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742241_1417 (size=4945) 2024-12-09T11:01:57,432 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_2/usercache/jenkins/appcache/application_1733741775522_0007/container_1733741775522_0007_01_000002/launch_container.sh] 2024-12-09T11:01:57,432 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_2/usercache/jenkins/appcache/application_1733741775522_0007/container_1733741775522_0007_01_000002/container_tokens] 2024-12-09T11:01:57,433 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_2/usercache/jenkins/appcache/application_1733741775522_0007/container_1733741775522_0007_01_000002/sysfs] 2024-12-09T11:01:57,470 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 33657 2024-12-09T11:01:58,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742243_1419 (size=4945) 2024-12-09T11:01:58,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742243_1419 (size=4945) 2024-12-09T11:01:58,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742243_1419 (size=4945) 2024-12-09T11:01:58,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742242_1418 (size=22246) 2024-12-09T11:01:58,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742242_1418 (size=22246) 2024-12-09T11:01:58,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742242_1418 (size=22246) 2024-12-09T11:01:58,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742244_1420 (size=482) 2024-12-09T11:01:58,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742244_1420 (size=482) 2024-12-09T11:01:58,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742244_1420 (size=482) 2024-12-09T11:01:58,660 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_3/usercache/jenkins/appcache/application_1733741775522_0007/container_1733741775522_0007_01_000003/launch_container.sh] 2024-12-09T11:01:58,660 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_3/usercache/jenkins/appcache/application_1733741775522_0007/container_1733741775522_0007_01_000003/container_tokens] 2024-12-09T11:01:58,660 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_3/usercache/jenkins/appcache/application_1733741775522_0007/container_1733741775522_0007_01_000003/sysfs] 2024-12-09T11:01:58,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742245_1421 (size=22246) 2024-12-09T11:01:58,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742245_1421 (size=22246) 2024-12-09T11:01:58,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742245_1421 (size=22246) 2024-12-09T11:01:58,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742246_1422 (size=349834) 2024-12-09T11:01:58,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742246_1422 (size=349834) 2024-12-09T11:01:58,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742246_1422 (size=349834) 2024-12-09T11:01:58,749 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0007_000001 (auth:SIMPLE) from 127.0.0.1:40612 2024-12-09T11:02:00,070 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-09T11:02:00,071 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-09T11:02:00,079 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-09T11:02:00,079 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-09T11:02:00,080 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-09T11:02:00,080 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1751713061_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-09T11:02:00,080 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-09T11:02:00,080 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-09T11:02:00,080 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1751713061_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742099253/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742099253/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-09T11:02:00,081 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742099253/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-09T11:02:00,081 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742099253/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-09T11:02:00,089 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T11:02:00,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=160, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T11:02:00,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=160 2024-12-09T11:02:00,093 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733742120093"}]},"ts":"1733742120093"} 2024-12-09T11:02:00,095 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLING in hbase:meta 2024-12-09T11:02:00,095 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLING 2024-12-09T11:02:00,096 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=161, ppid=160, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1}] 2024-12-09T11:02:00,098 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=162, ppid=161, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=4642d1cde05785f48336193c9593c4ad, UNASSIGN}] 2024-12-09T11:02:00,099 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=162, ppid=161, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=4642d1cde05785f48336193c9593c4ad, UNASSIGN 2024-12-09T11:02:00,099 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=162 updating hbase:meta row=4642d1cde05785f48336193c9593c4ad, regionState=CLOSING, regionLocation=3469f9ca0af3,39691,1733741766880 2024-12-09T11:02:00,101 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=162, ppid=161, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=4642d1cde05785f48336193c9593c4ad, UNASSIGN because future has completed 2024-12-09T11:02:00,101 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T11:02:00,101 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4642d1cde05785f48336193c9593c4ad, server=3469f9ca0af3,39691,1733741766880}] 2024-12-09T11:02:00,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=160 2024-12-09T11:02:00,254 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] handler.UnassignRegionHandler(122): Close 4642d1cde05785f48336193c9593c4ad 2024-12-09T11:02:00,255 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T11:02:00,255 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1722): Closing 4642d1cde05785f48336193c9593c4ad, disabling compactions & flushes 2024-12-09T11:02:00,255 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095498.4642d1cde05785f48336193c9593c4ad. 2024-12-09T11:02:00,255 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095498.4642d1cde05785f48336193c9593c4ad. 2024-12-09T11:02:00,255 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095498.4642d1cde05785f48336193c9593c4ad. after waiting 0 ms 2024-12-09T11:02:00,255 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095498.4642d1cde05785f48336193c9593c4ad. 2024-12-09T11:02:00,278 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4642d1cde05785f48336193c9593c4ad/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=8 2024-12-09T11:02:00,278 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T11:02:00,278 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095498.4642d1cde05785f48336193c9593c4ad. 2024-12-09T11:02:00,279 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1676): Region close journal for 4642d1cde05785f48336193c9593c4ad: Waiting for close lock at 1733742120255Running coprocessor pre-close hooks at 1733742120255Disabling compacts and flushes for region at 1733742120255Disabling writes for close at 1733742120255Writing region close event to WAL at 1733742120256 (+1 ms)Running coprocessor post-close hooks at 1733742120278 (+22 ms)Closed at 1733742120278 2024-12-09T11:02:00,281 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] handler.UnassignRegionHandler(157): Closed 4642d1cde05785f48336193c9593c4ad 2024-12-09T11:02:00,284 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=162 updating hbase:meta row=4642d1cde05785f48336193c9593c4ad, regionState=CLOSED 2024-12-09T11:02:00,286 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=163, ppid=162, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4642d1cde05785f48336193c9593c4ad, server=3469f9ca0af3,39691,1733741766880 because future has completed 2024-12-09T11:02:00,290 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=163, resume processing ppid=162 2024-12-09T11:02:00,291 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=163, ppid=162, state=SUCCESS, hasLock=false; CloseRegionProcedure 4642d1cde05785f48336193c9593c4ad, server=3469f9ca0af3,39691,1733741766880 in 186 msec 2024-12-09T11:02:00,293 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=162, resume processing ppid=161 2024-12-09T11:02:00,293 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=162, ppid=161, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=4642d1cde05785f48336193c9593c4ad, UNASSIGN in 192 msec 2024-12-09T11:02:00,297 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=161, resume processing ppid=160 2024-12-09T11:02:00,297 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=161, ppid=160, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 198 msec 2024-12-09T11:02:00,298 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733742120298"}]},"ts":"1733742120298"} 2024-12-09T11:02:00,301 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLED in hbase:meta 2024-12-09T11:02:00,301 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLED 2024-12-09T11:02:00,303 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=160, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 212 msec 2024-12-09T11:02:00,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=160 2024-12-09T11:02:00,413 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-09T11:02:00,413 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T11:02:00,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=164, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T11:02:00,416 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=164, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T11:02:00,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T11:02:00,417 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=164, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T11:02:00,420 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39691 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T11:02:00,422 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4642d1cde05785f48336193c9593c4ad 2024-12-09T11:02:00,422 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0ca641e4aaa755d9220f84ac1f07ec06 2024-12-09T11:02:00,422 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/a143a26c1939872db052f56b3aa41d8d 2024-12-09T11:02:00,424 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T11:02:00,424 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T11:02:00,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T11:02:00,426 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T11:02:00,430 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-09T11:02:00,430 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-09T11:02:00,431 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4642d1cde05785f48336193c9593c4ad/cf, FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4642d1cde05785f48336193c9593c4ad/recovered.edits] 2024-12-09T11:02:00,431 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/a143a26c1939872db052f56b3aa41d8d/cf, FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/a143a26c1939872db052f56b3aa41d8d/recovered.edits] 2024-12-09T11:02:00,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T11:02:00,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:02:00,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:02:00,432 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data null 2024-12-09T11:02:00,432 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-09T11:02:00,434 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:02:00,434 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T11:02:00,434 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:02:00,434 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data null 2024-12-09T11:02:00,434 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-09T11:02:00,434 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0ca641e4aaa755d9220f84ac1f07ec06/cf, FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0ca641e4aaa755d9220f84ac1f07ec06/recovered.edits] 2024-12-09T11:02:00,435 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T11:02:00,435 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T11:02:00,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=164 2024-12-09T11:02:00,438 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/a143a26c1939872db052f56b3aa41d8d/cf/e7750260d32e4355bb1844f2dc7b4be6 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/a143a26c1939872db052f56b3aa41d8d/cf/e7750260d32e4355bb1844f2dc7b4be6 2024-12-09T11:02:00,438 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4642d1cde05785f48336193c9593c4ad/cf/6c03af4923b44125a47519e1efea8f22.0ca641e4aaa755d9220f84ac1f07ec06 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4642d1cde05785f48336193c9593c4ad/cf/6c03af4923b44125a47519e1efea8f22.0ca641e4aaa755d9220f84ac1f07ec06 2024-12-09T11:02:00,440 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4642d1cde05785f48336193c9593c4ad/cf/e7750260d32e4355bb1844f2dc7b4be6.a143a26c1939872db052f56b3aa41d8d to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4642d1cde05785f48336193c9593c4ad/cf/e7750260d32e4355bb1844f2dc7b4be6.a143a26c1939872db052f56b3aa41d8d 2024-12-09T11:02:00,442 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T11:02:00,443 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T11:02:00,443 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0ca641e4aaa755d9220f84ac1f07ec06/cf/6c03af4923b44125a47519e1efea8f22 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0ca641e4aaa755d9220f84ac1f07ec06/cf/6c03af4923b44125a47519e1efea8f22 2024-12-09T11:02:00,443 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/a143a26c1939872db052f56b3aa41d8d/recovered.edits/8.seqid to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/a143a26c1939872db052f56b3aa41d8d/recovered.edits/8.seqid 2024-12-09T11:02:00,444 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/a143a26c1939872db052f56b3aa41d8d 2024-12-09T11:02:00,445 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4642d1cde05785f48336193c9593c4ad/recovered.edits/12.seqid to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4642d1cde05785f48336193c9593c4ad/recovered.edits/12.seqid 2024-12-09T11:02:00,445 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4642d1cde05785f48336193c9593c4ad 2024-12-09T11:02:00,447 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0ca641e4aaa755d9220f84ac1f07ec06/recovered.edits/8.seqid to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0ca641e4aaa755d9220f84ac1f07ec06/recovered.edits/8.seqid 2024-12-09T11:02:00,448 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/0ca641e4aaa755d9220f84ac1f07ec06 2024-12-09T11:02:00,448 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion-1 regions 2024-12-09T11:02:00,450 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=164, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T11:02:00,454 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of testtb-testExportFileSystemStateWithMergeRegion-1 from hbase:meta 2024-12-09T11:02:00,456 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' descriptor. 2024-12-09T11:02:00,457 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=164, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T11:02:00,457 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' from region states. 2024-12-09T11:02:00,457 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095498.4642d1cde05785f48336193c9593c4ad.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733742120457"}]},"ts":"9223372036854775807"} 2024-12-09T11:02:00,461 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 1 regions from META 2024-12-09T11:02:00,461 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 4642d1cde05785f48336193c9593c4ad, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095498.4642d1cde05785f48336193c9593c4ad.', STARTKEY => '', ENDKEY => ''}] 2024-12-09T11:02:00,461 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion-1' as deleted. 2024-12-09T11:02:00,461 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733742120461"}]},"ts":"9223372036854775807"} 2024-12-09T11:02:00,463 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion-1 state from META 2024-12-09T11:02:00,465 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=164, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T11:02:00,467 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=164, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 52 msec 2024-12-09T11:02:00,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=164 2024-12-09T11:02:00,542 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T11:02:00,542 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-09T11:02:00,543 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T11:02:00,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=165, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T11:02:00,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=165 2024-12-09T11:02:00,546 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733742120546"}]},"ts":"1733742120546"} 2024-12-09T11:02:00,548 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLING in hbase:meta 2024-12-09T11:02:00,548 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLING 2024-12-09T11:02:00,550 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion}] 2024-12-09T11:02:00,551 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=167, ppid=166, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=0413b0a34c22bfd368e5306d4a33b41e, UNASSIGN}, {pid=168, ppid=166, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=bd3e01337768bb7e558b2def558c3c3a, UNASSIGN}] 2024-12-09T11:02:00,552 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=168, ppid=166, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=bd3e01337768bb7e558b2def558c3c3a, UNASSIGN 2024-12-09T11:02:00,553 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=167, ppid=166, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=0413b0a34c22bfd368e5306d4a33b41e, UNASSIGN 2024-12-09T11:02:00,554 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=168 updating hbase:meta row=bd3e01337768bb7e558b2def558c3c3a, regionState=CLOSING, regionLocation=3469f9ca0af3,33293,1733741767044 2024-12-09T11:02:00,554 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=167 updating hbase:meta row=0413b0a34c22bfd368e5306d4a33b41e, regionState=CLOSING, regionLocation=3469f9ca0af3,39691,1733741766880 2024-12-09T11:02:00,555 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=168, ppid=166, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=bd3e01337768bb7e558b2def558c3c3a, UNASSIGN because future has completed 2024-12-09T11:02:00,556 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T11:02:00,556 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=169, ppid=168, state=RUNNABLE, hasLock=false; CloseRegionProcedure bd3e01337768bb7e558b2def558c3c3a, server=3469f9ca0af3,33293,1733741767044}] 2024-12-09T11:02:00,557 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=167, ppid=166, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=0413b0a34c22bfd368e5306d4a33b41e, UNASSIGN because future has completed 2024-12-09T11:02:00,558 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T11:02:00,559 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=170, ppid=167, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0413b0a34c22bfd368e5306d4a33b41e, server=3469f9ca0af3,39691,1733741766880}] 2024-12-09T11:02:00,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=165 2024-12-09T11:02:00,709 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] handler.UnassignRegionHandler(122): Close bd3e01337768bb7e558b2def558c3c3a 2024-12-09T11:02:00,709 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T11:02:00,709 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegion(1722): Closing bd3e01337768bb7e558b2def558c3c3a, disabling compactions & flushes 2024-12-09T11:02:00,709 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1733742093295.bd3e01337768bb7e558b2def558c3c3a. 2024-12-09T11:02:00,709 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733742093295.bd3e01337768bb7e558b2def558c3c3a. 2024-12-09T11:02:00,709 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733742093295.bd3e01337768bb7e558b2def558c3c3a. after waiting 0 ms 2024-12-09T11:02:00,709 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1733742093295.bd3e01337768bb7e558b2def558c3c3a. 2024-12-09T11:02:00,711 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] handler.UnassignRegionHandler(122): Close 0413b0a34c22bfd368e5306d4a33b41e 2024-12-09T11:02:00,711 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T11:02:00,711 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1722): Closing 0413b0a34c22bfd368e5306d4a33b41e, disabling compactions & flushes 2024-12-09T11:02:00,711 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1733742093295.0413b0a34c22bfd368e5306d4a33b41e. 2024-12-09T11:02:00,711 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733742093295.0413b0a34c22bfd368e5306d4a33b41e. 2024-12-09T11:02:00,711 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733742093295.0413b0a34c22bfd368e5306d4a33b41e. after waiting 0 ms 2024-12-09T11:02:00,711 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1733742093295.0413b0a34c22bfd368e5306d4a33b41e. 2024-12-09T11:02:00,715 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion/bd3e01337768bb7e558b2def558c3c3a/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T11:02:00,715 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion/0413b0a34c22bfd368e5306d4a33b41e/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T11:02:00,716 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T11:02:00,716 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1733742093295.bd3e01337768bb7e558b2def558c3c3a. 2024-12-09T11:02:00,716 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegion(1676): Region close journal for bd3e01337768bb7e558b2def558c3c3a: Waiting for close lock at 1733742120709Running coprocessor pre-close hooks at 1733742120709Disabling compacts and flushes for region at 1733742120709Disabling writes for close at 1733742120709Writing region close event to WAL at 1733742120710 (+1 ms)Running coprocessor post-close hooks at 1733742120716 (+6 ms)Closed at 1733742120716 2024-12-09T11:02:00,716 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T11:02:00,716 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1733742093295.0413b0a34c22bfd368e5306d4a33b41e. 2024-12-09T11:02:00,716 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1676): Region close journal for 0413b0a34c22bfd368e5306d4a33b41e: Waiting for close lock at 1733742120711Running coprocessor pre-close hooks at 1733742120711Disabling compacts and flushes for region at 1733742120711Disabling writes for close at 1733742120711Writing region close event to WAL at 1733742120712 (+1 ms)Running coprocessor post-close hooks at 1733742120716 (+4 ms)Closed at 1733742120716 2024-12-09T11:02:00,718 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] handler.UnassignRegionHandler(157): Closed bd3e01337768bb7e558b2def558c3c3a 2024-12-09T11:02:00,718 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=168 updating hbase:meta row=bd3e01337768bb7e558b2def558c3c3a, regionState=CLOSED 2024-12-09T11:02:00,719 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] handler.UnassignRegionHandler(157): Closed 0413b0a34c22bfd368e5306d4a33b41e 2024-12-09T11:02:00,720 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=167 updating hbase:meta row=0413b0a34c22bfd368e5306d4a33b41e, regionState=CLOSED 2024-12-09T11:02:00,720 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=169, ppid=168, state=RUNNABLE, hasLock=false; CloseRegionProcedure bd3e01337768bb7e558b2def558c3c3a, server=3469f9ca0af3,33293,1733741767044 because future has completed 2024-12-09T11:02:00,722 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=170, ppid=167, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0413b0a34c22bfd368e5306d4a33b41e, server=3469f9ca0af3,39691,1733741766880 because future has completed 2024-12-09T11:02:00,724 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=169, resume processing ppid=168 2024-12-09T11:02:00,724 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=169, ppid=168, state=SUCCESS, hasLock=false; CloseRegionProcedure bd3e01337768bb7e558b2def558c3c3a, server=3469f9ca0af3,33293,1733741767044 in 167 msec 2024-12-09T11:02:00,725 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=170, resume processing ppid=167 2024-12-09T11:02:00,725 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=170, ppid=167, state=SUCCESS, hasLock=false; CloseRegionProcedure 0413b0a34c22bfd368e5306d4a33b41e, server=3469f9ca0af3,39691,1733741766880 in 164 msec 2024-12-09T11:02:00,726 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=168, ppid=166, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=bd3e01337768bb7e558b2def558c3c3a, UNASSIGN in 173 msec 2024-12-09T11:02:00,734 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=167, resume processing ppid=166 2024-12-09T11:02:00,734 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=167, ppid=166, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=0413b0a34c22bfd368e5306d4a33b41e, UNASSIGN in 174 msec 2024-12-09T11:02:00,740 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=166, resume processing ppid=165 2024-12-09T11:02:00,740 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=166, ppid=165, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 186 msec 2024-12-09T11:02:00,742 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733742120742"}]},"ts":"1733742120742"} 2024-12-09T11:02:00,744 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLED in hbase:meta 2024-12-09T11:02:00,744 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLED 2024-12-09T11:02:00,746 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=165, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 202 msec 2024-12-09T11:02:00,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=165 2024-12-09T11:02:00,863 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-09T11:02:00,863 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T11:02:00,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=171, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T11:02:00,865 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=171, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T11:02:00,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T11:02:00,866 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=171, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T11:02:00,868 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39691 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T11:02:00,871 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion/0413b0a34c22bfd368e5306d4a33b41e 2024-12-09T11:02:00,871 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion/bd3e01337768bb7e558b2def558c3c3a 2024-12-09T11:02:00,874 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T11:02:00,874 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T11:02:00,874 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T11:02:00,875 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-09T11:02:00,875 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion/0413b0a34c22bfd368e5306d4a33b41e/cf, FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion/0413b0a34c22bfd368e5306d4a33b41e/recovered.edits] 2024-12-09T11:02:00,876 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion/bd3e01337768bb7e558b2def558c3c3a/cf, FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion/bd3e01337768bb7e558b2def558c3c3a/recovered.edits] 2024-12-09T11:02:00,878 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T11:02:00,878 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-09T11:02:00,881 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-09T11:02:00,882 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion/0413b0a34c22bfd368e5306d4a33b41e/cf/7ea5edac991c4103836a4ef8cd8c39a4 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/0413b0a34c22bfd368e5306d4a33b41e/cf/7ea5edac991c4103836a4ef8cd8c39a4 2024-12-09T11:02:00,882 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion/bd3e01337768bb7e558b2def558c3c3a/cf/9ac2d5cc9a324e34b4d48d84bdadcab6 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/bd3e01337768bb7e558b2def558c3c3a/cf/9ac2d5cc9a324e34b4d48d84bdadcab6 2024-12-09T11:02:00,885 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion/bd3e01337768bb7e558b2def558c3c3a/recovered.edits/9.seqid to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/bd3e01337768bb7e558b2def558c3c3a/recovered.edits/9.seqid 2024-12-09T11:02:00,885 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion/bd3e01337768bb7e558b2def558c3c3a 2024-12-09T11:02:00,885 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion/0413b0a34c22bfd368e5306d4a33b41e/recovered.edits/9.seqid to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/0413b0a34c22bfd368e5306d4a33b41e/recovered.edits/9.seqid 2024-12-09T11:02:00,886 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T11:02:00,886 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T11:02:00,886 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:02:00,886 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:02:00,887 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithMergeRegion/0413b0a34c22bfd368e5306d4a33b41e 2024-12-09T11:02:00,887 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion regions 2024-12-09T11:02:00,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=171 2024-12-09T11:02:00,887 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c 2024-12-09T11:02:00,888 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf] 2024-12-09T11:02:00,888 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T11:02:00,888 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:02:00,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:02:00,893 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data null 2024-12-09T11:02:00,893 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-09T11:02:00,893 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b20241209ed09fd8db6dd4d68bb0f5e3175feeb48_bd3e01337768bb7e558b2def558c3c3a to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b20241209ed09fd8db6dd4d68bb0f5e3175feeb48_bd3e01337768bb7e558b2def558c3c3a 2024-12-09T11:02:00,894 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241209fe0c8b23c89346088189699fbb92d41b_0413b0a34c22bfd368e5306d4a33b41e to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241209fe0c8b23c89346088189699fbb92d41b_0413b0a34c22bfd368e5306d4a33b41e 2024-12-09T11:02:00,896 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c 2024-12-09T11:02:00,900 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=171, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T11:02:00,904 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithMergeRegion from hbase:meta 2024-12-09T11:02:00,906 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion' descriptor. 2024-12-09T11:02:00,910 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=171, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T11:02:00,910 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion' from region states. 2024-12-09T11:02:00,910 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1733742093295.0413b0a34c22bfd368e5306d4a33b41e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733742120910"}]},"ts":"9223372036854775807"} 2024-12-09T11:02:00,910 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1733742093295.bd3e01337768bb7e558b2def558c3c3a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733742120910"}]},"ts":"9223372036854775807"} 2024-12-09T11:02:00,913 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-09T11:02:00,913 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 0413b0a34c22bfd368e5306d4a33b41e, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733742093295.0413b0a34c22bfd368e5306d4a33b41e.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => bd3e01337768bb7e558b2def558c3c3a, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733742093295.bd3e01337768bb7e558b2def558c3c3a.', STARTKEY => '1', ENDKEY => ''}] 2024-12-09T11:02:00,913 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion' as deleted. 2024-12-09T11:02:00,913 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733742120913"}]},"ts":"9223372036854775807"} 2024-12-09T11:02:00,915 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion state from META 2024-12-09T11:02:00,916 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=171, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T11:02:00,918 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=171, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 53 msec 2024-12-09T11:02:00,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=171 2024-12-09T11:02:00,993 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T11:02:00,993 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-09T11:02:01,003 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-12-09T11:02:01,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-09T11:02:01,006 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-12-09T11:02:01,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-09T11:02:01,009 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion-1" type: DISABLED 2024-12-09T11:02:01,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-09T11:02:01,037 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=819 (was 802) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1751713061_22 at /127.0.0.1:49000 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-23 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1359318497) connection to localhost/127.0.0.1:34915 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-22 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-237783572_1 at /127.0.0.1:48972 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-6487 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1751713061_22 at /127.0.0.1:51158 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-237783572_1 at /127.0.0.1:51134 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1751713061_22 at /127.0.0.1:55220 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 20462) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34915 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-21 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=829 (was 811) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1118 (was 1269), ProcessCount=14 (was 17), AvailableMemoryMB=3077 (was 2579) - AvailableMemoryMB LEAK? - 2024-12-09T11:02:01,037 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=819 is superior to 500 2024-12-09T11:02:01,060 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportExpiredSnapshot Thread=819, OpenFileDescriptor=829, MaxFileDescriptor=1048576, SystemLoadAverage=1118, ProcessCount=14, AvailableMemoryMB=3077 2024-12-09T11:02:01,060 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=819 is superior to 500 2024-12-09T11:02:01,062 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T11:02:01,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=172, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-09T11:02:01,064 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=172, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T11:02:01,065 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportExpiredSnapshot" procId is: 172 2024-12-09T11:02:01,065 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=172, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T11:02:01,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=172 2024-12-09T11:02:01,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742247_1423 (size=443) 2024-12-09T11:02:01,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742247_1423 (size=443) 2024-12-09T11:02:01,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742247_1423 (size=443) 2024-12-09T11:02:01,090 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 7998226f5ba8f3ecc1397ce969d3d6c5, NAME => 'testtb-testExportExpiredSnapshot,,1733742121062.7998226f5ba8f3ecc1397ce969d3d6c5.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T11:02:01,091 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => ba25c9349712e5280183c212484d14c4, NAME => 'testtb-testExportExpiredSnapshot,1,1733742121062.ba25c9349712e5280183c212484d14c4.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T11:02:01,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742249_1425 (size=68) 2024-12-09T11:02:01,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742248_1424 (size=68) 2024-12-09T11:02:01,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742249_1425 (size=68) 2024-12-09T11:02:01,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742249_1425 (size=68) 2024-12-09T11:02:01,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742248_1424 (size=68) 2024-12-09T11:02:01,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742248_1424 (size=68) 2024-12-09T11:02:01,122 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1733742121062.7998226f5ba8f3ecc1397ce969d3d6c5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:02:01,122 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing 7998226f5ba8f3ecc1397ce969d3d6c5, disabling compactions & flushes 2024-12-09T11:02:01,122 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1733742121062.7998226f5ba8f3ecc1397ce969d3d6c5. 2024-12-09T11:02:01,122 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1733742121062.7998226f5ba8f3ecc1397ce969d3d6c5. 2024-12-09T11:02:01,122 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1733742121062.7998226f5ba8f3ecc1397ce969d3d6c5. after waiting 0 ms 2024-12-09T11:02:01,122 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1733742121062.7998226f5ba8f3ecc1397ce969d3d6c5. 2024-12-09T11:02:01,122 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1733742121062.7998226f5ba8f3ecc1397ce969d3d6c5. 2024-12-09T11:02:01,122 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for 7998226f5ba8f3ecc1397ce969d3d6c5: Waiting for close lock at 1733742121122Disabling compacts and flushes for region at 1733742121122Disabling writes for close at 1733742121122Writing region close event to WAL at 1733742121122Closed at 1733742121122 2024-12-09T11:02:01,123 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1733742121062.ba25c9349712e5280183c212484d14c4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:02:01,123 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing ba25c9349712e5280183c212484d14c4, disabling compactions & flushes 2024-12-09T11:02:01,123 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1733742121062.ba25c9349712e5280183c212484d14c4. 2024-12-09T11:02:01,123 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1733742121062.ba25c9349712e5280183c212484d14c4. 2024-12-09T11:02:01,123 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1733742121062.ba25c9349712e5280183c212484d14c4. after waiting 0 ms 2024-12-09T11:02:01,123 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1733742121062.ba25c9349712e5280183c212484d14c4. 2024-12-09T11:02:01,123 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1733742121062.ba25c9349712e5280183c212484d14c4. 2024-12-09T11:02:01,123 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for ba25c9349712e5280183c212484d14c4: Waiting for close lock at 1733742121123Disabling compacts and flushes for region at 1733742121123Disabling writes for close at 1733742121123Writing region close event to WAL at 1733742121123Closed at 1733742121123 2024-12-09T11:02:01,126 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=172, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T11:02:01,126 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,,1733742121062.7998226f5ba8f3ecc1397ce969d3d6c5.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733742121126"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733742121126"}]},"ts":"1733742121126"} 2024-12-09T11:02:01,126 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,1,1733742121062.ba25c9349712e5280183c212484d14c4.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733742121126"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733742121126"}]},"ts":"1733742121126"} 2024-12-09T11:02:01,132 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-09T11:02:01,133 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=172, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T11:02:01,134 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733742121134"}]},"ts":"1733742121134"} 2024-12-09T11:02:01,138 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-09T11:02:01,139 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {3469f9ca0af3=0} racks are {/default-rack=0} 2024-12-09T11:02:01,140 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T11:02:01,140 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T11:02:01,140 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T11:02:01,140 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T11:02:01,140 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T11:02:01,140 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T11:02:01,140 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T11:02:01,140 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T11:02:01,140 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T11:02:01,140 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T11:02:01,140 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=173, ppid=172, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=7998226f5ba8f3ecc1397ce969d3d6c5, ASSIGN}, {pid=174, ppid=172, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=ba25c9349712e5280183c212484d14c4, ASSIGN}] 2024-12-09T11:02:01,143 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=174, ppid=172, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=ba25c9349712e5280183c212484d14c4, ASSIGN 2024-12-09T11:02:01,143 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=173, ppid=172, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=7998226f5ba8f3ecc1397ce969d3d6c5, ASSIGN 2024-12-09T11:02:01,144 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=174, ppid=172, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=ba25c9349712e5280183c212484d14c4, ASSIGN; state=OFFLINE, location=3469f9ca0af3,33293,1733741767044; forceNewPlan=false, retain=false 2024-12-09T11:02:01,145 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=173, ppid=172, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=7998226f5ba8f3ecc1397ce969d3d6c5, ASSIGN; state=OFFLINE, location=3469f9ca0af3,39691,1733741766880; forceNewPlan=false, retain=false 2024-12-09T11:02:01,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=172 2024-12-09T11:02:01,295 INFO [3469f9ca0af3:35815 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-09T11:02:01,295 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=174 updating hbase:meta row=ba25c9349712e5280183c212484d14c4, regionState=OPENING, regionLocation=3469f9ca0af3,33293,1733741767044 2024-12-09T11:02:01,295 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=173 updating hbase:meta row=7998226f5ba8f3ecc1397ce969d3d6c5, regionState=OPENING, regionLocation=3469f9ca0af3,39691,1733741766880 2024-12-09T11:02:01,298 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=174, ppid=172, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=ba25c9349712e5280183c212484d14c4, ASSIGN because future has completed 2024-12-09T11:02:01,298 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=175, ppid=174, state=RUNNABLE, hasLock=false; OpenRegionProcedure ba25c9349712e5280183c212484d14c4, server=3469f9ca0af3,33293,1733741767044}] 2024-12-09T11:02:01,298 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=173, ppid=172, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=7998226f5ba8f3ecc1397ce969d3d6c5, ASSIGN because future has completed 2024-12-09T11:02:01,299 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=176, ppid=173, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7998226f5ba8f3ecc1397ce969d3d6c5, server=3469f9ca0af3,39691,1733741766880}] 2024-12-09T11:02:01,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=172 2024-12-09T11:02:01,454 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=176}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,,1733742121062.7998226f5ba8f3ecc1397ce969d3d6c5. 2024-12-09T11:02:01,454 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=175}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,1,1733742121062.ba25c9349712e5280183c212484d14c4. 2024-12-09T11:02:01,454 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=176}] regionserver.HRegion(7752): Opening region: {ENCODED => 7998226f5ba8f3ecc1397ce969d3d6c5, NAME => 'testtb-testExportExpiredSnapshot,,1733742121062.7998226f5ba8f3ecc1397ce969d3d6c5.', STARTKEY => '', ENDKEY => '1'} 2024-12-09T11:02:01,454 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=175}] regionserver.HRegion(7752): Opening region: {ENCODED => ba25c9349712e5280183c212484d14c4, NAME => 'testtb-testExportExpiredSnapshot,1,1733742121062.ba25c9349712e5280183c212484d14c4.', STARTKEY => '1', ENDKEY => ''} 2024-12-09T11:02:01,455 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=175}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,1,1733742121062.ba25c9349712e5280183c212484d14c4. service=AccessControlService 2024-12-09T11:02:01,455 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=176}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,,1733742121062.7998226f5ba8f3ecc1397ce969d3d6c5. service=AccessControlService 2024-12-09T11:02:01,455 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=176}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T11:02:01,455 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=175}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T11:02:01,455 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=175}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot ba25c9349712e5280183c212484d14c4 2024-12-09T11:02:01,455 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=176}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot 7998226f5ba8f3ecc1397ce969d3d6c5 2024-12-09T11:02:01,455 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=175}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1733742121062.ba25c9349712e5280183c212484d14c4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:02:01,455 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=176}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1733742121062.7998226f5ba8f3ecc1397ce969d3d6c5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:02:01,455 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=175}] regionserver.HRegion(7794): checking encryption for ba25c9349712e5280183c212484d14c4 2024-12-09T11:02:01,455 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=175}] regionserver.HRegion(7797): checking classloading for ba25c9349712e5280183c212484d14c4 2024-12-09T11:02:01,455 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=176}] regionserver.HRegion(7794): checking encryption for 7998226f5ba8f3ecc1397ce969d3d6c5 2024-12-09T11:02:01,455 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=176}] regionserver.HRegion(7797): checking classloading for 7998226f5ba8f3ecc1397ce969d3d6c5 2024-12-09T11:02:01,456 INFO [StoreOpener-ba25c9349712e5280183c212484d14c4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ba25c9349712e5280183c212484d14c4 2024-12-09T11:02:01,457 INFO [StoreOpener-7998226f5ba8f3ecc1397ce969d3d6c5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 7998226f5ba8f3ecc1397ce969d3d6c5 2024-12-09T11:02:01,458 INFO [StoreOpener-ba25c9349712e5280183c212484d14c4-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ba25c9349712e5280183c212484d14c4 columnFamilyName cf 2024-12-09T11:02:01,458 INFO [StoreOpener-7998226f5ba8f3ecc1397ce969d3d6c5-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7998226f5ba8f3ecc1397ce969d3d6c5 columnFamilyName cf 2024-12-09T11:02:01,459 DEBUG [StoreOpener-ba25c9349712e5280183c212484d14c4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:02:01,459 DEBUG [StoreOpener-7998226f5ba8f3ecc1397ce969d3d6c5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:02:01,459 INFO [StoreOpener-7998226f5ba8f3ecc1397ce969d3d6c5-1 {}] regionserver.HStore(327): Store=7998226f5ba8f3ecc1397ce969d3d6c5/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:02:01,459 INFO [StoreOpener-ba25c9349712e5280183c212484d14c4-1 {}] regionserver.HStore(327): Store=ba25c9349712e5280183c212484d14c4/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:02:01,460 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=175}] regionserver.HRegion(1038): replaying wal for ba25c9349712e5280183c212484d14c4 2024-12-09T11:02:01,460 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=176}] regionserver.HRegion(1038): replaying wal for 7998226f5ba8f3ecc1397ce969d3d6c5 2024-12-09T11:02:01,460 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=175}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportExpiredSnapshot/ba25c9349712e5280183c212484d14c4 2024-12-09T11:02:01,460 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=176}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportExpiredSnapshot/7998226f5ba8f3ecc1397ce969d3d6c5 2024-12-09T11:02:01,461 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=175}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportExpiredSnapshot/ba25c9349712e5280183c212484d14c4 2024-12-09T11:02:01,461 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=176}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportExpiredSnapshot/7998226f5ba8f3ecc1397ce969d3d6c5 2024-12-09T11:02:01,461 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=175}] regionserver.HRegion(1048): stopping wal replay for ba25c9349712e5280183c212484d14c4 2024-12-09T11:02:01,461 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=175}] regionserver.HRegion(1060): Cleaning up temporary data for ba25c9349712e5280183c212484d14c4 2024-12-09T11:02:01,461 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=176}] regionserver.HRegion(1048): stopping wal replay for 7998226f5ba8f3ecc1397ce969d3d6c5 2024-12-09T11:02:01,461 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=176}] regionserver.HRegion(1060): Cleaning up temporary data for 7998226f5ba8f3ecc1397ce969d3d6c5 2024-12-09T11:02:01,463 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=176}] regionserver.HRegion(1093): writing seq id for 7998226f5ba8f3ecc1397ce969d3d6c5 2024-12-09T11:02:01,463 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=175}] regionserver.HRegion(1093): writing seq id for ba25c9349712e5280183c212484d14c4 2024-12-09T11:02:01,465 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=176}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportExpiredSnapshot/7998226f5ba8f3ecc1397ce969d3d6c5/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:02:01,465 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=176}] regionserver.HRegion(1114): Opened 7998226f5ba8f3ecc1397ce969d3d6c5; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72397778, jitterRate=0.07881096005439758}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T11:02:01,465 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=176}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7998226f5ba8f3ecc1397ce969d3d6c5 2024-12-09T11:02:01,465 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=175}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportExpiredSnapshot/ba25c9349712e5280183c212484d14c4/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:02:01,466 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=175}] regionserver.HRegion(1114): Opened ba25c9349712e5280183c212484d14c4; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62794640, jitterRate=-0.06428694725036621}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T11:02:01,466 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=175}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ba25c9349712e5280183c212484d14c4 2024-12-09T11:02:01,466 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=176}] regionserver.HRegion(1006): Region open journal for 7998226f5ba8f3ecc1397ce969d3d6c5: Running coprocessor pre-open hook at 1733742121455Writing region info on filesystem at 1733742121455Initializing all the Stores at 1733742121456 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733742121456Cleaning up temporary data from old regions at 1733742121461 (+5 ms)Running coprocessor post-open hooks at 1733742121465 (+4 ms)Region opened successfully at 1733742121466 (+1 ms) 2024-12-09T11:02:01,466 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=175}] regionserver.HRegion(1006): Region open journal for ba25c9349712e5280183c212484d14c4: Running coprocessor pre-open hook at 1733742121455Writing region info on filesystem at 1733742121455Initializing all the Stores at 1733742121456 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733742121456Cleaning up temporary data from old regions at 1733742121461 (+5 ms)Running coprocessor post-open hooks at 1733742121466 (+5 ms)Region opened successfully at 1733742121466 2024-12-09T11:02:01,467 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=176}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,,1733742121062.7998226f5ba8f3ecc1397ce969d3d6c5., pid=176, masterSystemTime=1733742121451 2024-12-09T11:02:01,467 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=175}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,1,1733742121062.ba25c9349712e5280183c212484d14c4., pid=175, masterSystemTime=1733742121450 2024-12-09T11:02:01,469 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=175}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,1,1733742121062.ba25c9349712e5280183c212484d14c4. 2024-12-09T11:02:01,469 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=175}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,1,1733742121062.ba25c9349712e5280183c212484d14c4. 2024-12-09T11:02:01,470 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=174 updating hbase:meta row=ba25c9349712e5280183c212484d14c4, regionState=OPEN, openSeqNum=2, regionLocation=3469f9ca0af3,33293,1733741767044 2024-12-09T11:02:01,470 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=176}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,,1733742121062.7998226f5ba8f3ecc1397ce969d3d6c5. 2024-12-09T11:02:01,470 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=176}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,,1733742121062.7998226f5ba8f3ecc1397ce969d3d6c5. 2024-12-09T11:02:01,471 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=173 updating hbase:meta row=7998226f5ba8f3ecc1397ce969d3d6c5, regionState=OPEN, openSeqNum=2, regionLocation=3469f9ca0af3,39691,1733741766880 2024-12-09T11:02:01,473 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=175, ppid=174, state=RUNNABLE, hasLock=false; OpenRegionProcedure ba25c9349712e5280183c212484d14c4, server=3469f9ca0af3,33293,1733741767044 because future has completed 2024-12-09T11:02:01,474 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=176, ppid=173, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7998226f5ba8f3ecc1397ce969d3d6c5, server=3469f9ca0af3,39691,1733741766880 because future has completed 2024-12-09T11:02:01,476 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=175, resume processing ppid=174 2024-12-09T11:02:01,477 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=175, ppid=174, state=SUCCESS, hasLock=false; OpenRegionProcedure ba25c9349712e5280183c212484d14c4, server=3469f9ca0af3,33293,1733741767044 in 176 msec 2024-12-09T11:02:01,477 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=176, resume processing ppid=173 2024-12-09T11:02:01,477 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=176, ppid=173, state=SUCCESS, hasLock=false; OpenRegionProcedure 7998226f5ba8f3ecc1397ce969d3d6c5, server=3469f9ca0af3,39691,1733741766880 in 176 msec 2024-12-09T11:02:01,478 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=174, ppid=172, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=ba25c9349712e5280183c212484d14c4, ASSIGN in 337 msec 2024-12-09T11:02:01,480 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=173, resume processing ppid=172 2024-12-09T11:02:01,480 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=173, ppid=172, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=7998226f5ba8f3ecc1397ce969d3d6c5, ASSIGN in 337 msec 2024-12-09T11:02:01,480 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=172, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T11:02:01,480 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733742121480"}]},"ts":"1733742121480"} 2024-12-09T11:02:01,482 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-09T11:02:01,483 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=172, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T11:02:01,483 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportExpiredSnapshot jenkins: RWXCA 2024-12-09T11:02:01,486 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39691 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-09T11:02:01,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:02:01,494 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:02:01,494 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:02:01,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:02:01,499 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T11:02:01,499 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T11:02:01,499 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T11:02:01,500 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T11:02:01,503 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=172, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot in 440 msec 2024-12-09T11:02:01,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=172 2024-12-09T11:02:01,692 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-09T11:02:01,692 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-09T11:02:01,695 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-12-09T11:02:01,695 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportExpiredSnapshot,,1733742121062.7998226f5ba8f3ecc1397ce969d3d6c5. 2024-12-09T11:02:01,696 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T11:02:01,697 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-09T11:02:01,702 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-09T11:02:01,707 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-09T11:02:01,709 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-09T11:02:01,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733742121709 (current time:1733742121709). 2024-12-09T11:02:01,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T11:02:01,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-09T11:02:01,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T11:02:01,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d68b1ca, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:02:01,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T11:02:01,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T11:02:01,711 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T11:02:01,711 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T11:02:01,712 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T11:02:01,712 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3362d0a7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:02:01,712 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T11:02:01,712 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T11:02:01,712 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:02:01,713 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51080, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T11:02:01,713 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bcb82c1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:02:01,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:02:01,714 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,33293,1733741767044, seqNum=-1] 2024-12-09T11:02:01,715 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:02:01,715 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52542, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:02:01,717 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815. 2024-12-09T11:02:01,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T11:02:01,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:02:01,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:02:01,717 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T11:02:01,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@361293db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:02:01,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T11:02:01,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T11:02:01,718 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T11:02:01,718 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T11:02:01,718 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T11:02:01,718 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e8a89fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:02:01,718 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T11:02:01,719 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T11:02:01,719 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:02:01,719 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51098, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T11:02:01,720 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5cb0ca42, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:02:01,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:02:01,721 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,33293,1733741767044, seqNum=-1] 2024-12-09T11:02:01,721 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:02:01,722 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52554, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:02:01,724 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb., hostname=3469f9ca0af3,39691,1733741766880, seqNum=2] 2024-12-09T11:02:01,724 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:02:01,725 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40602, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:02:01,726 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815. 2024-12-09T11:02:01,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T11:02:01,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:02:01,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:02:01,726 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T11:02:01,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-09T11:02:01,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T11:02:01,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=177, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-09T11:02:01,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 177 2024-12-09T11:02:01,728 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T11:02:01,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-12-09T11:02:01,729 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T11:02:01,731 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T11:02:01,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742250_1426 (size=170) 2024-12-09T11:02:01,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742250_1426 (size=170) 2024-12-09T11:02:01,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742250_1426 (size=170) 2024-12-09T11:02:01,738 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T11:02:01,738 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7998226f5ba8f3ecc1397ce969d3d6c5}, {pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ba25c9349712e5280183c212484d14c4}] 2024-12-09T11:02:01,739 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ba25c9349712e5280183c212484d14c4 2024-12-09T11:02:01,739 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7998226f5ba8f3ecc1397ce969d3d6c5 2024-12-09T11:02:01,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-12-09T11:02:01,890 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39691 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=178 2024-12-09T11:02:01,890 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33293 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=179 2024-12-09T11:02:01,891 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1733742121062.7998226f5ba8f3ecc1397ce969d3d6c5. 2024-12-09T11:02:01,891 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1733742121062.ba25c9349712e5280183c212484d14c4. 2024-12-09T11:02:01,891 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(2603): Flush status journal for 7998226f5ba8f3ecc1397ce969d3d6c5: 2024-12-09T11:02:01,891 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(2603): Flush status journal for ba25c9349712e5280183c212484d14c4: 2024-12-09T11:02:01,891 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1733742121062.7998226f5ba8f3ecc1397ce969d3d6c5. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-09T11:02:01,891 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1733742121062.ba25c9349712e5280183c212484d14c4. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-09T11:02:01,891 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1733742121062.7998226f5ba8f3ecc1397ce969d3d6c5.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-09T11:02:01,891 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1733742121062.ba25c9349712e5280183c212484d14c4.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-09T11:02:01,891 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T11:02:01,891 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T11:02:01,891 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-09T11:02:01,891 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-09T11:02:01,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742252_1428 (size=71) 2024-12-09T11:02:01,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742251_1427 (size=71) 2024-12-09T11:02:01,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742252_1428 (size=71) 2024-12-09T11:02:01,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742252_1428 (size=71) 2024-12-09T11:02:01,898 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1733742121062.7998226f5ba8f3ecc1397ce969d3d6c5. 2024-12-09T11:02:01,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742251_1427 (size=71) 2024-12-09T11:02:01,898 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1733742121062.ba25c9349712e5280183c212484d14c4. 2024-12-09T11:02:01,898 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=178 2024-12-09T11:02:01,898 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=179 2024-12-09T11:02:01,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742251_1427 (size=71) 2024-12-09T11:02:01,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=178 2024-12-09T11:02:01,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=179 2024-12-09T11:02:01,898 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region ba25c9349712e5280183c212484d14c4 2024-12-09T11:02:01,898 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region 7998226f5ba8f3ecc1397ce969d3d6c5 2024-12-09T11:02:01,899 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7998226f5ba8f3ecc1397ce969d3d6c5 2024-12-09T11:02:01,899 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ba25c9349712e5280183c212484d14c4 2024-12-09T11:02:01,900 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=178, ppid=177, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 7998226f5ba8f3ecc1397ce969d3d6c5 in 161 msec 2024-12-09T11:02:01,901 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=179, resume processing ppid=177 2024-12-09T11:02:01,901 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=179, ppid=177, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ba25c9349712e5280183c212484d14c4 in 161 msec 2024-12-09T11:02:01,902 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T11:02:01,902 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T11:02:01,903 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-09T11:02:01,903 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-09T11:02:01,903 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:02:01,904 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-09T11:02:01,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742253_1429 (size=63) 2024-12-09T11:02:01,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742253_1429 (size=63) 2024-12-09T11:02:01,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742253_1429 (size=63) 2024-12-09T11:02:01,910 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T11:02:01,910 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportExpiredSnapshot 2024-12-09T11:02:01,911 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot 2024-12-09T11:02:01,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742254_1430 (size=653) 2024-12-09T11:02:01,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742254_1430 (size=653) 2024-12-09T11:02:01,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742254_1430 (size=653) 2024-12-09T11:02:01,923 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T11:02:01,928 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T11:02:01,929 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/emptySnaptb0-testExportExpiredSnapshot 2024-12-09T11:02:01,930 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T11:02:01,931 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 177 2024-12-09T11:02:01,932 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=177, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 204 msec 2024-12-09T11:02:02,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-12-09T11:02:02,042 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-09T11:02:02,047 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39691 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,,1733742121062.7998226f5ba8f3ecc1397ce969d3d6c5. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T11:02:02,049 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33293 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,1,1733742121062.ba25c9349712e5280183c212484d14c4. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T11:02:02,050 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-09T11:02:02,052 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-12-09T11:02:02,052 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportExpiredSnapshot,,1733742121062.7998226f5ba8f3ecc1397ce969d3d6c5. 2024-12-09T11:02:02,053 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T11:02:02,054 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-09T11:02:02,059 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-09T11:02:02,063 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-09T11:02:02,065 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-09T11:02:02,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733742122065 (current time:1733742122065). 2024-12-09T11:02:02,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T11:02:02,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-09T11:02:02,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T11:02:02,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3227245, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:02:02,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T11:02:02,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T11:02:02,067 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T11:02:02,067 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T11:02:02,067 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T11:02:02,067 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72870277, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:02:02,067 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T11:02:02,068 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T11:02:02,068 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:02:02,069 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51110, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T11:02:02,069 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@569c1033, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:02:02,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:02:02,070 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,33293,1733741767044, seqNum=-1] 2024-12-09T11:02:02,070 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:02:02,071 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52566, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:02:02,073 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815. 2024-12-09T11:02:02,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T11:02:02,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:02:02,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:02:02,073 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T11:02:02,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35adf046, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:02:02,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T11:02:02,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T11:02:02,074 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T11:02:02,075 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T11:02:02,075 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T11:02:02,075 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40470b14, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:02:02,075 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T11:02:02,075 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T11:02:02,075 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:02:02,076 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51124, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T11:02:02,076 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b82bc91, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:02:02,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:02:02,077 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,33293,1733741767044, seqNum=-1] 2024-12-09T11:02:02,077 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:02:02,078 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52578, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:02:02,080 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb., hostname=3469f9ca0af3,39691,1733741766880, seqNum=2] 2024-12-09T11:02:02,080 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:02:02,081 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40618, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:02:02,082 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815. 2024-12-09T11:02:02,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T11:02:02,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:02:02,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:02:02,082 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T11:02:02,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-09T11:02:02,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T11:02:02,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=180, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-09T11:02:02,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 180 2024-12-09T11:02:02,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-09T11:02:02,084 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T11:02:02,085 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T11:02:02,087 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T11:02:02,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742255_1431 (size=165) 2024-12-09T11:02:02,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742255_1431 (size=165) 2024-12-09T11:02:02,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742255_1431 (size=165) 2024-12-09T11:02:02,094 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T11:02:02,094 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=181, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7998226f5ba8f3ecc1397ce969d3d6c5}, {pid=182, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ba25c9349712e5280183c212484d14c4}] 2024-12-09T11:02:02,095 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=181, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7998226f5ba8f3ecc1397ce969d3d6c5 2024-12-09T11:02:02,095 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=182, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ba25c9349712e5280183c212484d14c4 2024-12-09T11:02:02,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-09T11:02:02,246 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33293 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=182 2024-12-09T11:02:02,246 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39691 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=181 2024-12-09T11:02:02,246 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1733742121062.7998226f5ba8f3ecc1397ce969d3d6c5. 2024-12-09T11:02:02,246 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1733742121062.ba25c9349712e5280183c212484d14c4. 2024-12-09T11:02:02,247 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.HRegion(2902): Flushing 7998226f5ba8f3ecc1397ce969d3d6c5 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-09T11:02:02,247 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.HRegion(2902): Flushing ba25c9349712e5280183c212484d14c4 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-09T11:02:02,264 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412097e52b617a1e94ea8a50bbad69ac71e8b_7998226f5ba8f3ecc1397ce969d3d6c5 is 71, key is 055a62c2ebde4b2c1eefa144c36c61db/cf:q/1733742122047/Put/seqid=0 2024-12-09T11:02:02,265 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241209761c49b5b488450f910f4046e5f8460c_ba25c9349712e5280183c212484d14c4 is 71, key is 13a2593ee4553d1f8e6999756420474c/cf:q/1733742122049/Put/seqid=0 2024-12-09T11:02:02,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742256_1432 (size=5172) 2024-12-09T11:02:02,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742256_1432 (size=5172) 2024-12-09T11:02:02,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742256_1432 (size=5172) 2024-12-09T11:02:02,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742257_1433 (size=8101) 2024-12-09T11:02:02,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742257_1433 (size=8101) 2024-12-09T11:02:02,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742257_1433 (size=8101) 2024-12-09T11:02:02,270 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:02:02,274 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412097e52b617a1e94ea8a50bbad69ac71e8b_7998226f5ba8f3ecc1397ce969d3d6c5 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e202412097e52b617a1e94ea8a50bbad69ac71e8b_7998226f5ba8f3ecc1397ce969d3d6c5 2024-12-09T11:02:02,275 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportExpiredSnapshot/7998226f5ba8f3ecc1397ce969d3d6c5/.tmp/cf/e73952d29b8c44c2ba5399bd1c1315b8, store: [table=testtb-testExportExpiredSnapshot family=cf region=7998226f5ba8f3ecc1397ce969d3d6c5] 2024-12-09T11:02:02,276 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportExpiredSnapshot/7998226f5ba8f3ecc1397ce969d3d6c5/.tmp/cf/e73952d29b8c44c2ba5399bd1c1315b8 is 209, key is 015eb15abda6fc3fd27d9c0cba7e1936a/cf:q/1733742122047/Put/seqid=0 2024-12-09T11:02:02,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742258_1434 (size=6123) 2024-12-09T11:02:02,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742258_1434 (size=6123) 2024-12-09T11:02:02,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742258_1434 (size=6123) 2024-12-09T11:02:02,281 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=266, hasBloomFilter=true, into tmp file hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportExpiredSnapshot/7998226f5ba8f3ecc1397ce969d3d6c5/.tmp/cf/e73952d29b8c44c2ba5399bd1c1315b8 2024-12-09T11:02:02,286 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportExpiredSnapshot/7998226f5ba8f3ecc1397ce969d3d6c5/.tmp/cf/e73952d29b8c44c2ba5399bd1c1315b8 as hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportExpiredSnapshot/7998226f5ba8f3ecc1397ce969d3d6c5/cf/e73952d29b8c44c2ba5399bd1c1315b8 2024-12-09T11:02:02,291 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportExpiredSnapshot/7998226f5ba8f3ecc1397ce969d3d6c5/cf/e73952d29b8c44c2ba5399bd1c1315b8, entries=4, sequenceid=6, filesize=6.0 K 2024-12-09T11:02:02,292 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 7998226f5ba8f3ecc1397ce969d3d6c5 in 45ms, sequenceid=6, compaction requested=false 2024-12-09T11:02:02,292 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot' 2024-12-09T11:02:02,293 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.HRegion(2603): Flush status journal for 7998226f5ba8f3ecc1397ce969d3d6c5: 2024-12-09T11:02:02,293 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1733742121062.7998226f5ba8f3ecc1397ce969d3d6c5. for snaptb0-testExportExpiredSnapshot completed. 2024-12-09T11:02:02,293 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1733742121062.7998226f5ba8f3ecc1397ce969d3d6c5.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-09T11:02:02,293 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T11:02:02,293 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportExpiredSnapshot/7998226f5ba8f3ecc1397ce969d3d6c5/cf/e73952d29b8c44c2ba5399bd1c1315b8] hfiles 2024-12-09T11:02:02,293 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportExpiredSnapshot/7998226f5ba8f3ecc1397ce969d3d6c5/cf/e73952d29b8c44c2ba5399bd1c1315b8 for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-09T11:02:02,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742259_1435 (size=110) 2024-12-09T11:02:02,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742259_1435 (size=110) 2024-12-09T11:02:02,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742259_1435 (size=110) 2024-12-09T11:02:02,304 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1733742121062.7998226f5ba8f3ecc1397ce969d3d6c5. 2024-12-09T11:02:02,304 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=181 2024-12-09T11:02:02,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=181 2024-12-09T11:02:02,305 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region 7998226f5ba8f3ecc1397ce969d3d6c5 2024-12-09T11:02:02,305 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=181, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7998226f5ba8f3ecc1397ce969d3d6c5 2024-12-09T11:02:02,307 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=181, ppid=180, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 7998226f5ba8f3ecc1397ce969d3d6c5 in 212 msec 2024-12-09T11:02:02,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-09T11:02:02,671 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:02:02,675 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241209761c49b5b488450f910f4046e5f8460c_ba25c9349712e5280183c212484d14c4 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b20241209761c49b5b488450f910f4046e5f8460c_ba25c9349712e5280183c212484d14c4 2024-12-09T11:02:02,676 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportExpiredSnapshot/ba25c9349712e5280183c212484d14c4/.tmp/cf/7ec9bf9879634c0388afa9e8fd230600, store: [table=testtb-testExportExpiredSnapshot family=cf region=ba25c9349712e5280183c212484d14c4] 2024-12-09T11:02:02,677 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportExpiredSnapshot/ba25c9349712e5280183c212484d14c4/.tmp/cf/7ec9bf9879634c0388afa9e8fd230600 is 209, key is 14ed9520c126bf7e3367c508af10ce3e3/cf:q/1733742122049/Put/seqid=0 2024-12-09T11:02:02,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742260_1436 (size=14792) 2024-12-09T11:02:02,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742260_1436 (size=14792) 2024-12-09T11:02:02,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742260_1436 (size=14792) 2024-12-09T11:02:02,685 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportExpiredSnapshot/ba25c9349712e5280183c212484d14c4/.tmp/cf/7ec9bf9879634c0388afa9e8fd230600 2024-12-09T11:02:02,690 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportExpiredSnapshot/ba25c9349712e5280183c212484d14c4/.tmp/cf/7ec9bf9879634c0388afa9e8fd230600 as hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportExpiredSnapshot/ba25c9349712e5280183c212484d14c4/cf/7ec9bf9879634c0388afa9e8fd230600 2024-12-09T11:02:02,694 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportExpiredSnapshot/ba25c9349712e5280183c212484d14c4/cf/7ec9bf9879634c0388afa9e8fd230600, entries=46, sequenceid=6, filesize=14.4 K 2024-12-09T11:02:02,695 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for ba25c9349712e5280183c212484d14c4 in 449ms, sequenceid=6, compaction requested=false 2024-12-09T11:02:02,695 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.HRegion(2603): Flush status journal for ba25c9349712e5280183c212484d14c4: 2024-12-09T11:02:02,695 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1733742121062.ba25c9349712e5280183c212484d14c4. for snaptb0-testExportExpiredSnapshot completed. 2024-12-09T11:02:02,695 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1733742121062.ba25c9349712e5280183c212484d14c4.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-09T11:02:02,695 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T11:02:02,695 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportExpiredSnapshot/ba25c9349712e5280183c212484d14c4/cf/7ec9bf9879634c0388afa9e8fd230600] hfiles 2024-12-09T11:02:02,695 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportExpiredSnapshot/ba25c9349712e5280183c212484d14c4/cf/7ec9bf9879634c0388afa9e8fd230600 for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-09T11:02:02,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742261_1437 (size=110) 2024-12-09T11:02:02,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742261_1437 (size=110) 2024-12-09T11:02:02,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742261_1437 (size=110) 2024-12-09T11:02:02,704 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1733742121062.ba25c9349712e5280183c212484d14c4. 2024-12-09T11:02:02,704 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=182 2024-12-09T11:02:02,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=182 2024-12-09T11:02:02,704 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region ba25c9349712e5280183c212484d14c4 2024-12-09T11:02:02,704 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=182, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ba25c9349712e5280183c212484d14c4 2024-12-09T11:02:02,707 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=182, resume processing ppid=180 2024-12-09T11:02:02,707 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=182, ppid=180, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ba25c9349712e5280183c212484d14c4 in 611 msec 2024-12-09T11:02:02,707 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T11:02:02,707 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T11:02:02,708 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-09T11:02:02,708 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-09T11:02:02,708 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:02:02,709 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b20241209761c49b5b488450f910f4046e5f8460c_ba25c9349712e5280183c212484d14c4, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e202412097e52b617a1e94ea8a50bbad69ac71e8b_7998226f5ba8f3ecc1397ce969d3d6c5] hfiles 2024-12-09T11:02:02,709 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b20241209761c49b5b488450f910f4046e5f8460c_ba25c9349712e5280183c212484d14c4 2024-12-09T11:02:02,709 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e202412097e52b617a1e94ea8a50bbad69ac71e8b_7998226f5ba8f3ecc1397ce969d3d6c5 2024-12-09T11:02:02,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-09T11:02:02,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742262_1438 (size=294) 2024-12-09T11:02:02,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742262_1438 (size=294) 2024-12-09T11:02:02,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742262_1438 (size=294) 2024-12-09T11:02:02,716 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T11:02:02,716 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportExpiredSnapshot 2024-12-09T11:02:02,717 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot 2024-12-09T11:02:02,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742263_1439 (size=963) 2024-12-09T11:02:02,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742263_1439 (size=963) 2024-12-09T11:02:02,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742263_1439 (size=963) 2024-12-09T11:02:02,733 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T11:02:02,739 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T11:02:02,739 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testExportExpiredSnapshot 2024-12-09T11:02:02,740 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T11:02:02,740 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 180 2024-12-09T11:02:02,741 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=180, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 657 msec 2024-12-09T11:02:03,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-09T11:02:03,222 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-09T11:02:03,224 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T11:02:03,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=183, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot 2024-12-09T11:02:03,226 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=183, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T11:02:03,226 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportExpiredSnapshot" procId is: 183 2024-12-09T11:02:03,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=183 2024-12-09T11:02:03,226 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=183, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T11:02:03,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742264_1440 (size=436) 2024-12-09T11:02:03,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742264_1440 (size=436) 2024-12-09T11:02:03,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742264_1440 (size=436) 2024-12-09T11:02:03,236 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => cca3861865154a4d1c7857f88e2ede7a, NAME => 'testExportExpiredSnapshot,,1733742123223.cca3861865154a4d1c7857f88e2ede7a.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T11:02:03,237 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => f33da0e723b1140f1a0ea77cd0d168fd, NAME => 'testExportExpiredSnapshot,1,1733742123223.f33da0e723b1140f1a0ea77cd0d168fd.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T11:02:03,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742265_1441 (size=61) 2024-12-09T11:02:03,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742265_1441 (size=61) 2024-12-09T11:02:03,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742265_1441 (size=61) 2024-12-09T11:02:03,243 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1733742123223.cca3861865154a4d1c7857f88e2ede7a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:02:03,243 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing cca3861865154a4d1c7857f88e2ede7a, disabling compactions & flushes 2024-12-09T11:02:03,243 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1733742123223.cca3861865154a4d1c7857f88e2ede7a. 2024-12-09T11:02:03,243 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1733742123223.cca3861865154a4d1c7857f88e2ede7a. 2024-12-09T11:02:03,243 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1733742123223.cca3861865154a4d1c7857f88e2ede7a. after waiting 0 ms 2024-12-09T11:02:03,243 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1733742123223.cca3861865154a4d1c7857f88e2ede7a. 2024-12-09T11:02:03,243 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1733742123223.cca3861865154a4d1c7857f88e2ede7a. 2024-12-09T11:02:03,243 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for cca3861865154a4d1c7857f88e2ede7a: Waiting for close lock at 1733742123243Disabling compacts and flushes for region at 1733742123243Disabling writes for close at 1733742123243Writing region close event to WAL at 1733742123243Closed at 1733742123243 2024-12-09T11:02:03,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742266_1442 (size=61) 2024-12-09T11:02:03,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742266_1442 (size=61) 2024-12-09T11:02:03,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742266_1442 (size=61) 2024-12-09T11:02:03,247 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1733742123223.f33da0e723b1140f1a0ea77cd0d168fd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:02:03,247 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing f33da0e723b1140f1a0ea77cd0d168fd, disabling compactions & flushes 2024-12-09T11:02:03,247 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1733742123223.f33da0e723b1140f1a0ea77cd0d168fd. 2024-12-09T11:02:03,248 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1733742123223.f33da0e723b1140f1a0ea77cd0d168fd. 2024-12-09T11:02:03,248 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1733742123223.f33da0e723b1140f1a0ea77cd0d168fd. after waiting 0 ms 2024-12-09T11:02:03,248 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1733742123223.f33da0e723b1140f1a0ea77cd0d168fd. 2024-12-09T11:02:03,248 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1733742123223.f33da0e723b1140f1a0ea77cd0d168fd. 2024-12-09T11:02:03,248 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for f33da0e723b1140f1a0ea77cd0d168fd: Waiting for close lock at 1733742123247Disabling compacts and flushes for region at 1733742123247Disabling writes for close at 1733742123248 (+1 ms)Writing region close event to WAL at 1733742123248Closed at 1733742123248 2024-12-09T11:02:03,249 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=183, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T11:02:03,249 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,,1733742123223.cca3861865154a4d1c7857f88e2ede7a.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1733742123249"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733742123249"}]},"ts":"1733742123249"} 2024-12-09T11:02:03,249 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,1,1733742123223.f33da0e723b1140f1a0ea77cd0d168fd.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1733742123249"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733742123249"}]},"ts":"1733742123249"} 2024-12-09T11:02:03,251 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-09T11:02:03,252 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=183, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T11:02:03,252 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733742123252"}]},"ts":"1733742123252"} 2024-12-09T11:02:03,255 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-09T11:02:03,255 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {3469f9ca0af3=0} racks are {/default-rack=0} 2024-12-09T11:02:03,257 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T11:02:03,257 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T11:02:03,257 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T11:02:03,257 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T11:02:03,257 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T11:02:03,257 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T11:02:03,257 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T11:02:03,257 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T11:02:03,257 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T11:02:03,257 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T11:02:03,257 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=184, ppid=183, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=cca3861865154a4d1c7857f88e2ede7a, ASSIGN}, {pid=185, ppid=183, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=f33da0e723b1140f1a0ea77cd0d168fd, ASSIGN}] 2024-12-09T11:02:03,258 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=185, ppid=183, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=f33da0e723b1140f1a0ea77cd0d168fd, ASSIGN 2024-12-09T11:02:03,258 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=184, ppid=183, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=cca3861865154a4d1c7857f88e2ede7a, ASSIGN 2024-12-09T11:02:03,259 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=184, ppid=183, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=cca3861865154a4d1c7857f88e2ede7a, ASSIGN; state=OFFLINE, location=3469f9ca0af3,39691,1733741766880; forceNewPlan=false, retain=false 2024-12-09T11:02:03,259 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=185, ppid=183, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=f33da0e723b1140f1a0ea77cd0d168fd, ASSIGN; state=OFFLINE, location=3469f9ca0af3,33293,1733741767044; forceNewPlan=false, retain=false 2024-12-09T11:02:03,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=183 2024-12-09T11:02:03,409 INFO [3469f9ca0af3:35815 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-09T11:02:03,410 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=184 updating hbase:meta row=cca3861865154a4d1c7857f88e2ede7a, regionState=OPENING, regionLocation=3469f9ca0af3,39691,1733741766880 2024-12-09T11:02:03,410 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=185 updating hbase:meta row=f33da0e723b1140f1a0ea77cd0d168fd, regionState=OPENING, regionLocation=3469f9ca0af3,33293,1733741767044 2024-12-09T11:02:03,411 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=184, ppid=183, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=cca3861865154a4d1c7857f88e2ede7a, ASSIGN because future has completed 2024-12-09T11:02:03,412 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=186, ppid=184, state=RUNNABLE, hasLock=false; OpenRegionProcedure cca3861865154a4d1c7857f88e2ede7a, server=3469f9ca0af3,39691,1733741766880}] 2024-12-09T11:02:03,412 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=185, ppid=183, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=f33da0e723b1140f1a0ea77cd0d168fd, ASSIGN because future has completed 2024-12-09T11:02:03,413 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=187, ppid=185, state=RUNNABLE, hasLock=false; OpenRegionProcedure f33da0e723b1140f1a0ea77cd0d168fd, server=3469f9ca0af3,33293,1733741767044}] 2024-12-09T11:02:03,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=183 2024-12-09T11:02:03,567 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=186}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,,1733742123223.cca3861865154a4d1c7857f88e2ede7a. 2024-12-09T11:02:03,567 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=187}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,1,1733742123223.f33da0e723b1140f1a0ea77cd0d168fd. 2024-12-09T11:02:03,567 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=186}] regionserver.HRegion(7752): Opening region: {ENCODED => cca3861865154a4d1c7857f88e2ede7a, NAME => 'testExportExpiredSnapshot,,1733742123223.cca3861865154a4d1c7857f88e2ede7a.', STARTKEY => '', ENDKEY => '1'} 2024-12-09T11:02:03,567 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=187}] regionserver.HRegion(7752): Opening region: {ENCODED => f33da0e723b1140f1a0ea77cd0d168fd, NAME => 'testExportExpiredSnapshot,1,1733742123223.f33da0e723b1140f1a0ea77cd0d168fd.', STARTKEY => '1', ENDKEY => ''} 2024-12-09T11:02:03,567 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=186}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,,1733742123223.cca3861865154a4d1c7857f88e2ede7a. service=AccessControlService 2024-12-09T11:02:03,567 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=187}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,1,1733742123223.f33da0e723b1140f1a0ea77cd0d168fd. service=AccessControlService 2024-12-09T11:02:03,567 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=186}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T11:02:03,567 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=187}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T11:02:03,567 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=186}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot cca3861865154a4d1c7857f88e2ede7a 2024-12-09T11:02:03,567 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=186}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1733742123223.cca3861865154a4d1c7857f88e2ede7a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:02:03,567 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=187}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot f33da0e723b1140f1a0ea77cd0d168fd 2024-12-09T11:02:03,567 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=186}] regionserver.HRegion(7794): checking encryption for cca3861865154a4d1c7857f88e2ede7a 2024-12-09T11:02:03,567 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=186}] regionserver.HRegion(7797): checking classloading for cca3861865154a4d1c7857f88e2ede7a 2024-12-09T11:02:03,567 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=187}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1733742123223.f33da0e723b1140f1a0ea77cd0d168fd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:02:03,567 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=187}] regionserver.HRegion(7794): checking encryption for f33da0e723b1140f1a0ea77cd0d168fd 2024-12-09T11:02:03,568 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=187}] regionserver.HRegion(7797): checking classloading for f33da0e723b1140f1a0ea77cd0d168fd 2024-12-09T11:02:03,569 INFO [StoreOpener-cca3861865154a4d1c7857f88e2ede7a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region cca3861865154a4d1c7857f88e2ede7a 2024-12-09T11:02:03,569 INFO [StoreOpener-f33da0e723b1140f1a0ea77cd0d168fd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region f33da0e723b1140f1a0ea77cd0d168fd 2024-12-09T11:02:03,570 INFO [StoreOpener-cca3861865154a4d1c7857f88e2ede7a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cca3861865154a4d1c7857f88e2ede7a columnFamilyName cf 2024-12-09T11:02:03,570 INFO [StoreOpener-f33da0e723b1140f1a0ea77cd0d168fd-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f33da0e723b1140f1a0ea77cd0d168fd columnFamilyName cf 2024-12-09T11:02:03,571 DEBUG [StoreOpener-cca3861865154a4d1c7857f88e2ede7a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:02:03,571 DEBUG [StoreOpener-f33da0e723b1140f1a0ea77cd0d168fd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:02:03,571 INFO [StoreOpener-cca3861865154a4d1c7857f88e2ede7a-1 {}] regionserver.HStore(327): Store=cca3861865154a4d1c7857f88e2ede7a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:02:03,571 INFO [StoreOpener-f33da0e723b1140f1a0ea77cd0d168fd-1 {}] regionserver.HStore(327): Store=f33da0e723b1140f1a0ea77cd0d168fd/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:02:03,571 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=186}] regionserver.HRegion(1038): replaying wal for cca3861865154a4d1c7857f88e2ede7a 2024-12-09T11:02:03,571 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=187}] regionserver.HRegion(1038): replaying wal for f33da0e723b1140f1a0ea77cd0d168fd 2024-12-09T11:02:03,572 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=186}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportExpiredSnapshot/cca3861865154a4d1c7857f88e2ede7a 2024-12-09T11:02:03,572 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=187}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportExpiredSnapshot/f33da0e723b1140f1a0ea77cd0d168fd 2024-12-09T11:02:03,572 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=186}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportExpiredSnapshot/cca3861865154a4d1c7857f88e2ede7a 2024-12-09T11:02:03,572 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=187}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportExpiredSnapshot/f33da0e723b1140f1a0ea77cd0d168fd 2024-12-09T11:02:03,573 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=187}] regionserver.HRegion(1048): stopping wal replay for f33da0e723b1140f1a0ea77cd0d168fd 2024-12-09T11:02:03,573 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=186}] regionserver.HRegion(1048): stopping wal replay for cca3861865154a4d1c7857f88e2ede7a 2024-12-09T11:02:03,573 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=186}] regionserver.HRegion(1060): Cleaning up temporary data for cca3861865154a4d1c7857f88e2ede7a 2024-12-09T11:02:03,573 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=187}] regionserver.HRegion(1060): Cleaning up temporary data for f33da0e723b1140f1a0ea77cd0d168fd 2024-12-09T11:02:03,575 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=187}] regionserver.HRegion(1093): writing seq id for f33da0e723b1140f1a0ea77cd0d168fd 2024-12-09T11:02:03,575 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=186}] regionserver.HRegion(1093): writing seq id for cca3861865154a4d1c7857f88e2ede7a 2024-12-09T11:02:03,576 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=187}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportExpiredSnapshot/f33da0e723b1140f1a0ea77cd0d168fd/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:02:03,577 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=186}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportExpiredSnapshot/cca3861865154a4d1c7857f88e2ede7a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:02:03,577 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=187}] regionserver.HRegion(1114): Opened f33da0e723b1140f1a0ea77cd0d168fd; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64455791, jitterRate=-0.03953386843204498}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T11:02:03,577 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=187}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f33da0e723b1140f1a0ea77cd0d168fd 2024-12-09T11:02:03,577 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=186}] regionserver.HRegion(1114): Opened cca3861865154a4d1c7857f88e2ede7a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71096668, jitterRate=0.05942291021347046}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T11:02:03,577 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=186}] regionserver.HRegion(1122): Running coprocessor post-open hooks for cca3861865154a4d1c7857f88e2ede7a 2024-12-09T11:02:03,577 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=186}] regionserver.HRegion(1006): Region open journal for cca3861865154a4d1c7857f88e2ede7a: Running coprocessor pre-open hook at 1733742123567Writing region info on filesystem at 1733742123568 (+1 ms)Initializing all the Stores at 1733742123568Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733742123568Cleaning up temporary data from old regions at 1733742123573 (+5 ms)Running coprocessor post-open hooks at 1733742123577 (+4 ms)Region opened successfully at 1733742123577 2024-12-09T11:02:03,577 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=187}] regionserver.HRegion(1006): Region open journal for f33da0e723b1140f1a0ea77cd0d168fd: Running coprocessor pre-open hook at 1733742123568Writing region info on filesystem at 1733742123568Initializing all the Stores at 1733742123568Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733742123568Cleaning up temporary data from old regions at 1733742123573 (+5 ms)Running coprocessor post-open hooks at 1733742123577 (+4 ms)Region opened successfully at 1733742123577 2024-12-09T11:02:03,578 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=187}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,1,1733742123223.f33da0e723b1140f1a0ea77cd0d168fd., pid=187, masterSystemTime=1733742123564 2024-12-09T11:02:03,578 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=186}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,,1733742123223.cca3861865154a4d1c7857f88e2ede7a., pid=186, masterSystemTime=1733742123564 2024-12-09T11:02:03,580 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=186}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,,1733742123223.cca3861865154a4d1c7857f88e2ede7a. 2024-12-09T11:02:03,580 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=186}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,,1733742123223.cca3861865154a4d1c7857f88e2ede7a. 2024-12-09T11:02:03,581 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=184 updating hbase:meta row=cca3861865154a4d1c7857f88e2ede7a, regionState=OPEN, openSeqNum=2, regionLocation=3469f9ca0af3,39691,1733741766880 2024-12-09T11:02:03,581 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=187}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,1,1733742123223.f33da0e723b1140f1a0ea77cd0d168fd. 2024-12-09T11:02:03,581 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=187}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,1,1733742123223.f33da0e723b1140f1a0ea77cd0d168fd. 2024-12-09T11:02:03,582 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=185 updating hbase:meta row=f33da0e723b1140f1a0ea77cd0d168fd, regionState=OPEN, openSeqNum=2, regionLocation=3469f9ca0af3,33293,1733741767044 2024-12-09T11:02:03,583 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=186, ppid=184, state=RUNNABLE, hasLock=false; OpenRegionProcedure cca3861865154a4d1c7857f88e2ede7a, server=3469f9ca0af3,39691,1733741766880 because future has completed 2024-12-09T11:02:03,584 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=187, ppid=185, state=RUNNABLE, hasLock=false; OpenRegionProcedure f33da0e723b1140f1a0ea77cd0d168fd, server=3469f9ca0af3,33293,1733741767044 because future has completed 2024-12-09T11:02:03,586 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=186, resume processing ppid=184 2024-12-09T11:02:03,586 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=186, ppid=184, state=SUCCESS, hasLock=false; OpenRegionProcedure cca3861865154a4d1c7857f88e2ede7a, server=3469f9ca0af3,39691,1733741766880 in 172 msec 2024-12-09T11:02:03,587 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=187, resume processing ppid=185 2024-12-09T11:02:03,587 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=187, ppid=185, state=SUCCESS, hasLock=false; OpenRegionProcedure f33da0e723b1140f1a0ea77cd0d168fd, server=3469f9ca0af3,33293,1733741767044 in 171 msec 2024-12-09T11:02:03,587 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=184, ppid=183, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=cca3861865154a4d1c7857f88e2ede7a, ASSIGN in 329 msec 2024-12-09T11:02:03,589 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=185, resume processing ppid=183 2024-12-09T11:02:03,589 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=185, ppid=183, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=f33da0e723b1140f1a0ea77cd0d168fd, ASSIGN in 330 msec 2024-12-09T11:02:03,589 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=183, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T11:02:03,590 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733742123589"}]},"ts":"1733742123589"} 2024-12-09T11:02:03,591 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-09T11:02:03,592 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=183, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T11:02:03,592 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testExportExpiredSnapshot jenkins: RWXCA 2024-12-09T11:02:03,594 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39691 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-09T11:02:03,596 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:02:03,596 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:02:03,596 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:02:03,596 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:02:03,599 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T11:02:03,599 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T11:02:03,599 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T11:02:03,599 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T11:02:03,600 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T11:02:03,600 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T11:02:03,600 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T11:02:03,600 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T11:02:03,600 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=183, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot in 374 msec 2024-12-09T11:02:03,735 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-12-09T11:02:03,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=183 2024-12-09T11:02:03,852 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportExpiredSnapshot completed 2024-12-09T11:02:03,852 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-09T11:02:03,855 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportExpiredSnapshot 2024-12-09T11:02:03,855 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportExpiredSnapshot,,1733742123223.cca3861865154a4d1c7857f88e2ede7a. 2024-12-09T11:02:03,855 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T11:02:03,857 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-09T11:02:03,862 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-09T11:02:03,871 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-09T11:02:03,882 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39691 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,,1733742123223.cca3861865154a4d1c7857f88e2ede7a. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T11:02:03,884 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33293 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,1,1733742123223.f33da0e723b1140f1a0ea77cd0d168fd. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T11:02:03,885 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-09T11:02:03,888 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportExpiredSnapshot 2024-12-09T11:02:03,888 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportExpiredSnapshot,,1733742123223.cca3861865154a4d1c7857f88e2ede7a. 2024-12-09T11:02:03,888 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T11:02:03,890 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-09T11:02:03,896 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-09T11:02:03,904 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-09T11:02:03,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-09T11:02:03,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T11:02:03,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e003a3e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:02:03,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T11:02:03,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T11:02:03,906 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T11:02:03,906 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T11:02:03,906 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T11:02:03,906 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44128cf2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:02:03,906 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T11:02:03,906 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T11:02:03,907 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:02:03,907 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51142, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T11:02:03,908 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35d7a3fb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:02:03,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:02:03,909 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,33293,1733741767044, seqNum=-1] 2024-12-09T11:02:03,909 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:02:03,910 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52588, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:02:03,912 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815. 2024-12-09T11:02:03,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T11:02:03,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:02:03,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:02:03,912 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T11:02:03,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@656d1cf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:02:03,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T11:02:03,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T11:02:03,918 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T11:02:03,918 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T11:02:03,918 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T11:02:03,918 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32847a28, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:02:03,918 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T11:02:03,918 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T11:02:03,918 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:02:03,919 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51150, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T11:02:03,919 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65c9c62a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:02:03,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:02:03,920 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,33293,1733741767044, seqNum=-1] 2024-12-09T11:02:03,921 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:02:03,922 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52596, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:02:03,924 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb., hostname=3469f9ca0af3,39691,1733741766880, seqNum=2] 2024-12-09T11:02:03,924 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:02:03,925 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40634, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:02:03,926 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815. 2024-12-09T11:02:03,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T11:02:03,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:02:03,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:02:03,926 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T11:02:03,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-09T11:02:03,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T11:02:03,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=188, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-09T11:02:03,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 188 2024-12-09T11:02:03,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-12-09T11:02:03,929 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PREPARE 2024-12-09T11:02:03,930 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T11:02:03,932 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T11:02:03,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742267_1443 (size=152) 2024-12-09T11:02:03,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742267_1443 (size=152) 2024-12-09T11:02:03,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742267_1443 (size=152) 2024-12-09T11:02:03,939 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T11:02:03,939 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=189, ppid=188, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cca3861865154a4d1c7857f88e2ede7a}, {pid=190, ppid=188, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f33da0e723b1140f1a0ea77cd0d168fd}] 2024-12-09T11:02:03,940 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=189, ppid=188, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cca3861865154a4d1c7857f88e2ede7a 2024-12-09T11:02:03,940 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=190, ppid=188, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f33da0e723b1140f1a0ea77cd0d168fd 2024-12-09T11:02:04,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-12-09T11:02:04,092 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39691 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=189 2024-12-09T11:02:04,092 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33293 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=190 2024-12-09T11:02:04,092 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,,1733742123223.cca3861865154a4d1c7857f88e2ede7a. 2024-12-09T11:02:04,092 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,1,1733742123223.f33da0e723b1140f1a0ea77cd0d168fd. 2024-12-09T11:02:04,092 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegion(2902): Flushing cca3861865154a4d1c7857f88e2ede7a 1/1 column families, dataSize=400 B heapSize=1.09 KB 2024-12-09T11:02:04,092 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegion(2902): Flushing f33da0e723b1140f1a0ea77cd0d168fd 1/1 column families, dataSize=2.87 KB heapSize=6.44 KB 2024-12-09T11:02:04,110 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209a2f739dddf014c5b8e21348077944469_cca3861865154a4d1c7857f88e2ede7a is 71, key is 0038254e05edcbd70cdd8cde36992802/cf:q/1733742123882/Put/seqid=0 2024-12-09T11:02:04,116 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241209f558fad0669040288a73ee942b9c4381_f33da0e723b1140f1a0ea77cd0d168fd is 71, key is 13f58ed0c51772bf18332660c66e1ddf/cf:q/1733742123884/Put/seqid=0 2024-12-09T11:02:04,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742268_1444 (size=5312) 2024-12-09T11:02:04,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742268_1444 (size=5312) 2024-12-09T11:02:04,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742268_1444 (size=5312) 2024-12-09T11:02:04,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742269_1445 (size=7961) 2024-12-09T11:02:04,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742269_1445 (size=7961) 2024-12-09T11:02:04,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742269_1445 (size=7961) 2024-12-09T11:02:04,124 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:02:04,128 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241209f558fad0669040288a73ee942b9c4381_f33da0e723b1140f1a0ea77cd0d168fd to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/c4ca4238a0b923820dcc509a6f75849b20241209f558fad0669040288a73ee942b9c4381_f33da0e723b1140f1a0ea77cd0d168fd 2024-12-09T11:02:04,129 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportExpiredSnapshot/f33da0e723b1140f1a0ea77cd0d168fd/.tmp/cf/87e337655bb34bfe9f9b5b92e6c77b15, store: [table=testExportExpiredSnapshot family=cf region=f33da0e723b1140f1a0ea77cd0d168fd] 2024-12-09T11:02:04,129 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportExpiredSnapshot/f33da0e723b1140f1a0ea77cd0d168fd/.tmp/cf/87e337655bb34bfe9f9b5b92e6c77b15 is 202, key is 1ee3920c04b1b81502f675f593ed0849f/cf:q/1733742123884/Put/seqid=0 2024-12-09T11:02:04,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742270_1446 (size=14067) 2024-12-09T11:02:04,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742270_1446 (size=14067) 2024-12-09T11:02:04,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742270_1446 (size=14067) 2024-12-09T11:02:04,135 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=2.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportExpiredSnapshot/f33da0e723b1140f1a0ea77cd0d168fd/.tmp/cf/87e337655bb34bfe9f9b5b92e6c77b15 2024-12-09T11:02:04,140 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportExpiredSnapshot/f33da0e723b1140f1a0ea77cd0d168fd/.tmp/cf/87e337655bb34bfe9f9b5b92e6c77b15 as hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportExpiredSnapshot/f33da0e723b1140f1a0ea77cd0d168fd/cf/87e337655bb34bfe9f9b5b92e6c77b15 2024-12-09T11:02:04,145 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportExpiredSnapshot/f33da0e723b1140f1a0ea77cd0d168fd/cf/87e337655bb34bfe9f9b5b92e6c77b15, entries=44, sequenceid=5, filesize=13.7 K 2024-12-09T11:02:04,146 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegion(3140): Finished flush of dataSize ~2.87 KB/2936, heapSize ~6.42 KB/6576, currentSize=0 B/0 for f33da0e723b1140f1a0ea77cd0d168fd in 53ms, sequenceid=5, compaction requested=false 2024-12-09T11:02:04,146 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegion(2603): Flush status journal for f33da0e723b1140f1a0ea77cd0d168fd: 2024-12-09T11:02:04,146 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,1,1733742123223.f33da0e723b1140f1a0ea77cd0d168fd. for snapshot-testExportExpiredSnapshot completed. 2024-12-09T11:02:04,146 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,1,1733742123223.f33da0e723b1140f1a0ea77cd0d168fd.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-09T11:02:04,146 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T11:02:04,146 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportExpiredSnapshot/f33da0e723b1140f1a0ea77cd0d168fd/cf/87e337655bb34bfe9f9b5b92e6c77b15] hfiles 2024-12-09T11:02:04,146 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportExpiredSnapshot/f33da0e723b1140f1a0ea77cd0d168fd/cf/87e337655bb34bfe9f9b5b92e6c77b15 for snapshot=snapshot-testExportExpiredSnapshot 2024-12-09T11:02:04,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742271_1447 (size=103) 2024-12-09T11:02:04,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742271_1447 (size=103) 2024-12-09T11:02:04,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742271_1447 (size=103) 2024-12-09T11:02:04,156 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,1,1733742123223.f33da0e723b1140f1a0ea77cd0d168fd. 2024-12-09T11:02:04,157 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=190 2024-12-09T11:02:04,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=190 2024-12-09T11:02:04,157 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region f33da0e723b1140f1a0ea77cd0d168fd 2024-12-09T11:02:04,157 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=190, ppid=188, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f33da0e723b1140f1a0ea77cd0d168fd 2024-12-09T11:02:04,161 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=190, ppid=188, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f33da0e723b1140f1a0ea77cd0d168fd in 220 msec 2024-12-09T11:02:04,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-12-09T11:02:04,502 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T11:02:04,517 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:02:04,522 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209a2f739dddf014c5b8e21348077944469_cca3861865154a4d1c7857f88e2ede7a to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/d41d8cd98f00b204e9800998ecf8427e20241209a2f739dddf014c5b8e21348077944469_cca3861865154a4d1c7857f88e2ede7a 2024-12-09T11:02:04,522 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportExpiredSnapshot/cca3861865154a4d1c7857f88e2ede7a/.tmp/cf/b20ea28fbc5340118bad1cd2bc24f5da, store: [table=testExportExpiredSnapshot family=cf region=cca3861865154a4d1c7857f88e2ede7a] 2024-12-09T11:02:04,523 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportExpiredSnapshot/cca3861865154a4d1c7857f88e2ede7a/.tmp/cf/b20ea28fbc5340118bad1cd2bc24f5da is 202, key is 0e4828b67575b69b64523fcb6071f620c/cf:q/1733742123882/Put/seqid=0 2024-12-09T11:02:04,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742272_1448 (size=6484) 2024-12-09T11:02:04,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742272_1448 (size=6484) 2024-12-09T11:02:04,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742272_1448 (size=6484) 2024-12-09T11:02:04,528 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=400, hasBloomFilter=true, into tmp file hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportExpiredSnapshot/cca3861865154a4d1c7857f88e2ede7a/.tmp/cf/b20ea28fbc5340118bad1cd2bc24f5da 2024-12-09T11:02:04,532 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportExpiredSnapshot/cca3861865154a4d1c7857f88e2ede7a/.tmp/cf/b20ea28fbc5340118bad1cd2bc24f5da as hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportExpiredSnapshot/cca3861865154a4d1c7857f88e2ede7a/cf/b20ea28fbc5340118bad1cd2bc24f5da 2024-12-09T11:02:04,536 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportExpiredSnapshot/cca3861865154a4d1c7857f88e2ede7a/cf/b20ea28fbc5340118bad1cd2bc24f5da, entries=6, sequenceid=5, filesize=6.3 K 2024-12-09T11:02:04,537 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegion(3140): Finished flush of dataSize ~400 B/400, heapSize ~1.08 KB/1104, currentSize=0 B/0 for cca3861865154a4d1c7857f88e2ede7a in 445ms, sequenceid=5, compaction requested=false 2024-12-09T11:02:04,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegion(2603): Flush status journal for cca3861865154a4d1c7857f88e2ede7a: 2024-12-09T11:02:04,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,,1733742123223.cca3861865154a4d1c7857f88e2ede7a. for snapshot-testExportExpiredSnapshot completed. 2024-12-09T11:02:04,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,,1733742123223.cca3861865154a4d1c7857f88e2ede7a.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-09T11:02:04,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T11:02:04,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportExpiredSnapshot/cca3861865154a4d1c7857f88e2ede7a/cf/b20ea28fbc5340118bad1cd2bc24f5da] hfiles 2024-12-09T11:02:04,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportExpiredSnapshot/cca3861865154a4d1c7857f88e2ede7a/cf/b20ea28fbc5340118bad1cd2bc24f5da for snapshot=snapshot-testExportExpiredSnapshot 2024-12-09T11:02:04,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742273_1449 (size=103) 2024-12-09T11:02:04,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742273_1449 (size=103) 2024-12-09T11:02:04,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742273_1449 (size=103) 2024-12-09T11:02:04,543 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,,1733742123223.cca3861865154a4d1c7857f88e2ede7a. 2024-12-09T11:02:04,543 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=189 2024-12-09T11:02:04,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=189 2024-12-09T11:02:04,544 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region cca3861865154a4d1c7857f88e2ede7a 2024-12-09T11:02:04,544 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=189, ppid=188, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cca3861865154a4d1c7857f88e2ede7a 2024-12-09T11:02:04,546 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=189, resume processing ppid=188 2024-12-09T11:02:04,546 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=189, ppid=188, state=SUCCESS, hasLock=false; SnapshotRegionProcedure cca3861865154a4d1c7857f88e2ede7a in 605 msec 2024-12-09T11:02:04,546 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T11:02:04,547 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T11:02:04,547 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-09T11:02:04,547 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-09T11:02:04,548 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:02:04,549 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/c4ca4238a0b923820dcc509a6f75849b20241209f558fad0669040288a73ee942b9c4381_f33da0e723b1140f1a0ea77cd0d168fd, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/d41d8cd98f00b204e9800998ecf8427e20241209a2f739dddf014c5b8e21348077944469_cca3861865154a4d1c7857f88e2ede7a] hfiles 2024-12-09T11:02:04,549 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/c4ca4238a0b923820dcc509a6f75849b20241209f558fad0669040288a73ee942b9c4381_f33da0e723b1140f1a0ea77cd0d168fd 2024-12-09T11:02:04,549 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/d41d8cd98f00b204e9800998ecf8427e20241209a2f739dddf014c5b8e21348077944469_cca3861865154a4d1c7857f88e2ede7a 2024-12-09T11:02:04,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-12-09T11:02:04,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742274_1450 (size=287) 2024-12-09T11:02:04,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742274_1450 (size=287) 2024-12-09T11:02:04,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742274_1450 (size=287) 2024-12-09T11:02:04,556 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T11:02:04,556 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportExpiredSnapshot 2024-12-09T11:02:04,557 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-09T11:02:04,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742275_1451 (size=935) 2024-12-09T11:02:04,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742275_1451 (size=935) 2024-12-09T11:02:04,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742275_1451 (size=935) 2024-12-09T11:02:04,575 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T11:02:04,580 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T11:02:04,580 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snapshot-testExportExpiredSnapshot 2024-12-09T11:02:04,581 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T11:02:04,581 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 188 2024-12-09T11:02:04,583 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=188, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } in 654 msec 2024-12-09T11:02:04,818 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0007_000001 (auth:SIMPLE) from 127.0.0.1:36356 2024-12-09T11:02:04,831 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_1/usercache/jenkins/appcache/application_1733741775522_0007/container_1733741775522_0007_01_000001/launch_container.sh] 2024-12-09T11:02:04,831 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_1/usercache/jenkins/appcache/application_1733741775522_0007/container_1733741775522_0007_01_000001/container_tokens] 2024-12-09T11:02:04,831 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_1/usercache/jenkins/appcache/application_1733741775522_0007/container_1733741775522_0007_01_000001/sysfs] 2024-12-09T11:02:05,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-12-09T11:02:05,062 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportExpiredSnapshot completed 2024-12-09T11:02:05,812 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T11:02:06,469 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-09T11:02:06,469 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-09T11:02:06,470 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-09T11:02:06,470 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-09T11:02:06,470 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T11:02:06,470 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T11:02:09,106 WARN [regionserver/3469f9ca0af3:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 5, running: 1 2024-12-09T11:02:11,408 DEBUG [master/3469f9ca0af3:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-09T11:02:11,408 DEBUG [master/3469f9ca0af3:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region f33da0e723b1140f1a0ea77cd0d168fd changed from -1.0 to 0.0, refreshing cache 2024-12-09T11:02:11,408 DEBUG [master/3469f9ca0af3:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region ba25c9349712e5280183c212484d14c4 changed from -1.0 to 0.0, refreshing cache 2024-12-09T11:02:11,408 DEBUG [master/3469f9ca0af3:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region cca3861865154a4d1c7857f88e2ede7a changed from -1.0 to 0.0, refreshing cache 2024-12-09T11:02:11,408 DEBUG [master/3469f9ca0af3:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 7998226f5ba8f3ecc1397ce969d3d6c5 changed from -1.0 to 0.0, refreshing cache 2024-12-09T11:02:11,973 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T11:02:15,076 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742135076 2024-12-09T11:02:15,076 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:35869, tgtDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742135076, rawTgtDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742135076, srcFsUri=hdfs://localhost:35869, srcDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T11:02:15,106 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:35869, inputRoot=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T11:02:15,106 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1751713061_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742135076, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742135076/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-09T11:02:15,108 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-09T11:02:15,109 ERROR [Time-limited test {}] util.AbstractHBaseTool(152): Error running command-line tool org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException: TTL for snapshot 'snapshot-testExportExpiredSnapshot' has already expired. at org.apache.hadoop.hbase.snapshot.ExportSnapshot.verifySnapshot(ExportSnapshot.java:960) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1105) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:570) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportExpiredSnapshot(TestExportSnapshot.java:362) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:02:15,110 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportExpiredSnapshot 2024-12-09T11:02:15,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=191, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-09T11:02:15,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=191 2024-12-09T11:02:15,112 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733742135112"}]},"ts":"1733742135112"} 2024-12-09T11:02:15,114 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLING in hbase:meta 2024-12-09T11:02:15,114 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportExpiredSnapshot to state=DISABLING 2024-12-09T11:02:15,114 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=192, ppid=191, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot}] 2024-12-09T11:02:15,116 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=193, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=7998226f5ba8f3ecc1397ce969d3d6c5, UNASSIGN}, {pid=194, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=ba25c9349712e5280183c212484d14c4, UNASSIGN}] 2024-12-09T11:02:15,116 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=194, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=ba25c9349712e5280183c212484d14c4, UNASSIGN 2024-12-09T11:02:15,116 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=193, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=7998226f5ba8f3ecc1397ce969d3d6c5, UNASSIGN 2024-12-09T11:02:15,117 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=194 updating hbase:meta row=ba25c9349712e5280183c212484d14c4, regionState=CLOSING, regionLocation=3469f9ca0af3,33293,1733741767044 2024-12-09T11:02:15,117 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=193 updating hbase:meta row=7998226f5ba8f3ecc1397ce969d3d6c5, regionState=CLOSING, regionLocation=3469f9ca0af3,39691,1733741766880 2024-12-09T11:02:15,118 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=194, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=ba25c9349712e5280183c212484d14c4, UNASSIGN because future has completed 2024-12-09T11:02:15,119 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T11:02:15,119 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=195, ppid=194, state=RUNNABLE, hasLock=false; CloseRegionProcedure ba25c9349712e5280183c212484d14c4, server=3469f9ca0af3,33293,1733741767044}] 2024-12-09T11:02:15,119 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=193, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=7998226f5ba8f3ecc1397ce969d3d6c5, UNASSIGN because future has completed 2024-12-09T11:02:15,119 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T11:02:15,119 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=196, ppid=193, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7998226f5ba8f3ecc1397ce969d3d6c5, server=3469f9ca0af3,39691,1733741766880}] 2024-12-09T11:02:15,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=191 2024-12-09T11:02:15,271 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] handler.UnassignRegionHandler(122): Close ba25c9349712e5280183c212484d14c4 2024-12-09T11:02:15,271 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T11:02:15,271 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1722): Closing ba25c9349712e5280183c212484d14c4, disabling compactions & flushes 2024-12-09T11:02:15,271 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1733742121062.ba25c9349712e5280183c212484d14c4. 2024-12-09T11:02:15,271 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1733742121062.ba25c9349712e5280183c212484d14c4. 2024-12-09T11:02:15,271 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1733742121062.ba25c9349712e5280183c212484d14c4. after waiting 0 ms 2024-12-09T11:02:15,271 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1733742121062.ba25c9349712e5280183c212484d14c4. 2024-12-09T11:02:15,272 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] handler.UnassignRegionHandler(122): Close 7998226f5ba8f3ecc1397ce969d3d6c5 2024-12-09T11:02:15,272 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T11:02:15,272 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1722): Closing 7998226f5ba8f3ecc1397ce969d3d6c5, disabling compactions & flushes 2024-12-09T11:02:15,272 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1733742121062.7998226f5ba8f3ecc1397ce969d3d6c5. 2024-12-09T11:02:15,272 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1733742121062.7998226f5ba8f3ecc1397ce969d3d6c5. 2024-12-09T11:02:15,272 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1733742121062.7998226f5ba8f3ecc1397ce969d3d6c5. after waiting 0 ms 2024-12-09T11:02:15,272 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1733742121062.7998226f5ba8f3ecc1397ce969d3d6c5. 2024-12-09T11:02:15,275 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportExpiredSnapshot/ba25c9349712e5280183c212484d14c4/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T11:02:15,276 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportExpiredSnapshot/7998226f5ba8f3ecc1397ce969d3d6c5/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T11:02:15,276 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T11:02:15,276 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1733742121062.ba25c9349712e5280183c212484d14c4. 2024-12-09T11:02:15,276 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1676): Region close journal for ba25c9349712e5280183c212484d14c4: Waiting for close lock at 1733742135271Running coprocessor pre-close hooks at 1733742135271Disabling compacts and flushes for region at 1733742135271Disabling writes for close at 1733742135271Writing region close event to WAL at 1733742135272 (+1 ms)Running coprocessor post-close hooks at 1733742135276 (+4 ms)Closed at 1733742135276 2024-12-09T11:02:15,276 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T11:02:15,276 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1733742121062.7998226f5ba8f3ecc1397ce969d3d6c5. 2024-12-09T11:02:15,276 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1676): Region close journal for 7998226f5ba8f3ecc1397ce969d3d6c5: Waiting for close lock at 1733742135272Running coprocessor pre-close hooks at 1733742135272Disabling compacts and flushes for region at 1733742135272Disabling writes for close at 1733742135272Writing region close event to WAL at 1733742135273 (+1 ms)Running coprocessor post-close hooks at 1733742135276 (+3 ms)Closed at 1733742135276 2024-12-09T11:02:15,278 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] handler.UnassignRegionHandler(157): Closed ba25c9349712e5280183c212484d14c4 2024-12-09T11:02:15,278 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=194 updating hbase:meta row=ba25c9349712e5280183c212484d14c4, regionState=CLOSED 2024-12-09T11:02:15,278 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] handler.UnassignRegionHandler(157): Closed 7998226f5ba8f3ecc1397ce969d3d6c5 2024-12-09T11:02:15,279 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=193 updating hbase:meta row=7998226f5ba8f3ecc1397ce969d3d6c5, regionState=CLOSED 2024-12-09T11:02:15,280 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=195, ppid=194, state=RUNNABLE, hasLock=false; CloseRegionProcedure ba25c9349712e5280183c212484d14c4, server=3469f9ca0af3,33293,1733741767044 because future has completed 2024-12-09T11:02:15,281 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=196, ppid=193, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7998226f5ba8f3ecc1397ce969d3d6c5, server=3469f9ca0af3,39691,1733741766880 because future has completed 2024-12-09T11:02:15,282 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=195, resume processing ppid=194 2024-12-09T11:02:15,282 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=195, ppid=194, state=SUCCESS, hasLock=false; CloseRegionProcedure ba25c9349712e5280183c212484d14c4, server=3469f9ca0af3,33293,1733741767044 in 162 msec 2024-12-09T11:02:15,283 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=196, resume processing ppid=193 2024-12-09T11:02:15,283 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=196, ppid=193, state=SUCCESS, hasLock=false; CloseRegionProcedure 7998226f5ba8f3ecc1397ce969d3d6c5, server=3469f9ca0af3,39691,1733741766880 in 163 msec 2024-12-09T11:02:15,283 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=194, ppid=192, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=ba25c9349712e5280183c212484d14c4, UNASSIGN in 166 msec 2024-12-09T11:02:15,285 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=193, resume processing ppid=192 2024-12-09T11:02:15,285 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=193, ppid=192, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=7998226f5ba8f3ecc1397ce969d3d6c5, UNASSIGN in 167 msec 2024-12-09T11:02:15,287 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=192, resume processing ppid=191 2024-12-09T11:02:15,287 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=192, ppid=191, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot in 171 msec 2024-12-09T11:02:15,288 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733742135288"}]},"ts":"1733742135288"} 2024-12-09T11:02:15,289 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLED in hbase:meta 2024-12-09T11:02:15,289 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportExpiredSnapshot to state=DISABLED 2024-12-09T11:02:15,290 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=191, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot in 180 msec 2024-12-09T11:02:15,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=191 2024-12-09T11:02:15,432 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-09T11:02:15,433 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportExpiredSnapshot 2024-12-09T11:02:15,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=197, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-09T11:02:15,434 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=197, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-09T11:02:15,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportExpiredSnapshot 2024-12-09T11:02:15,435 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=197, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-09T11:02:15,437 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39691 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportExpiredSnapshot 2024-12-09T11:02:15,439 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportExpiredSnapshot/7998226f5ba8f3ecc1397ce969d3d6c5 2024-12-09T11:02:15,439 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportExpiredSnapshot/ba25c9349712e5280183c212484d14c4 2024-12-09T11:02:15,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-09T11:02:15,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-09T11:02:15,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-09T11:02:15,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-09T11:02:15,441 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportExpiredSnapshot/7998226f5ba8f3ecc1397ce969d3d6c5/cf, FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportExpiredSnapshot/7998226f5ba8f3ecc1397ce969d3d6c5/recovered.edits] 2024-12-09T11:02:15,441 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportExpiredSnapshot/ba25c9349712e5280183c212484d14c4/cf, FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportExpiredSnapshot/ba25c9349712e5280183c212484d14c4/recovered.edits] 2024-12-09T11:02:15,441 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-09T11:02:15,441 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-09T11:02:15,441 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-09T11:02:15,441 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-09T11:02:15,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-09T11:02:15,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-09T11:02:15,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-09T11:02:15,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:02:15,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:02:15,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:02:15,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-09T11:02:15,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:02:15,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=197 2024-12-09T11:02:15,444 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T11:02:15,444 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T11:02:15,444 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T11:02:15,444 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T11:02:15,445 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportExpiredSnapshot/ba25c9349712e5280183c212484d14c4/cf/7ec9bf9879634c0388afa9e8fd230600 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportExpiredSnapshot/ba25c9349712e5280183c212484d14c4/cf/7ec9bf9879634c0388afa9e8fd230600 2024-12-09T11:02:15,446 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportExpiredSnapshot/7998226f5ba8f3ecc1397ce969d3d6c5/cf/e73952d29b8c44c2ba5399bd1c1315b8 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportExpiredSnapshot/7998226f5ba8f3ecc1397ce969d3d6c5/cf/e73952d29b8c44c2ba5399bd1c1315b8 2024-12-09T11:02:15,448 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportExpiredSnapshot/ba25c9349712e5280183c212484d14c4/recovered.edits/9.seqid to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportExpiredSnapshot/ba25c9349712e5280183c212484d14c4/recovered.edits/9.seqid 2024-12-09T11:02:15,448 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportExpiredSnapshot/ba25c9349712e5280183c212484d14c4 2024-12-09T11:02:15,448 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportExpiredSnapshot/7998226f5ba8f3ecc1397ce969d3d6c5/recovered.edits/9.seqid to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportExpiredSnapshot/7998226f5ba8f3ecc1397ce969d3d6c5/recovered.edits/9.seqid 2024-12-09T11:02:15,449 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportExpiredSnapshot/7998226f5ba8f3ecc1397ce969d3d6c5 2024-12-09T11:02:15,449 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportExpiredSnapshot regions 2024-12-09T11:02:15,449 DEBUG [PEWorker-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f 2024-12-09T11:02:15,450 DEBUG [PEWorker-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf] 2024-12-09T11:02:15,453 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b20241209761c49b5b488450f910f4046e5f8460c_ba25c9349712e5280183c212484d14c4 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b20241209761c49b5b488450f910f4046e5f8460c_ba25c9349712e5280183c212484d14c4 2024-12-09T11:02:15,453 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e202412097e52b617a1e94ea8a50bbad69ac71e8b_7998226f5ba8f3ecc1397ce969d3d6c5 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e202412097e52b617a1e94ea8a50bbad69ac71e8b_7998226f5ba8f3ecc1397ce969d3d6c5 2024-12-09T11:02:15,454 DEBUG [PEWorker-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f 2024-12-09T11:02:15,456 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=197, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-09T11:02:15,458 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportExpiredSnapshot from hbase:meta 2024-12-09T11:02:15,460 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportExpiredSnapshot' descriptor. 2024-12-09T11:02:15,461 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=197, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-09T11:02:15,461 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportExpiredSnapshot' from region states. 2024-12-09T11:02:15,461 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,,1733742121062.7998226f5ba8f3ecc1397ce969d3d6c5.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733742135461"}]},"ts":"9223372036854775807"} 2024-12-09T11:02:15,461 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,1,1733742121062.ba25c9349712e5280183c212484d14c4.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733742135461"}]},"ts":"9223372036854775807"} 2024-12-09T11:02:15,463 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-09T11:02:15,463 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 7998226f5ba8f3ecc1397ce969d3d6c5, NAME => 'testtb-testExportExpiredSnapshot,,1733742121062.7998226f5ba8f3ecc1397ce969d3d6c5.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => ba25c9349712e5280183c212484d14c4, NAME => 'testtb-testExportExpiredSnapshot,1,1733742121062.ba25c9349712e5280183c212484d14c4.', STARTKEY => '1', ENDKEY => ''}] 2024-12-09T11:02:15,463 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportExpiredSnapshot' as deleted. 2024-12-09T11:02:15,464 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733742135463"}]},"ts":"9223372036854775807"} 2024-12-09T11:02:15,465 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportExpiredSnapshot state from META 2024-12-09T11:02:15,466 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=197, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-09T11:02:15,467 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=197, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot in 33 msec 2024-12-09T11:02:15,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=197 2024-12-09T11:02:15,553 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportExpiredSnapshot 2024-12-09T11:02:15,553 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-09T11:02:15,561 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportExpiredSnapshot" type: DISABLED 2024-12-09T11:02:15,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportExpiredSnapshot 2024-12-09T11:02:15,564 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportExpiredSnapshot" type: DISABLED 2024-12-09T11:02:15,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportExpiredSnapshot 2024-12-09T11:02:15,566 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportExpiredSnapshot" type: DISABLED 2024-12-09T11:02:15,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportExpiredSnapshot 2024-12-09T11:02:15,589 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportExpiredSnapshot Thread=814 (was 819), OpenFileDescriptor=813 (was 829), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=919 (was 1118), ProcessCount=14 (was 14), AvailableMemoryMB=2815 (was 3077) 2024-12-09T11:02:15,589 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=814 is superior to 500 2024-12-09T11:02:15,606 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testEmptyExportFileSystemState Thread=814, OpenFileDescriptor=813, MaxFileDescriptor=1048576, SystemLoadAverage=919, ProcessCount=14, AvailableMemoryMB=2815 2024-12-09T11:02:15,606 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=814 is superior to 500 2024-12-09T11:02:15,607 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T11:02:15,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=198, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-09T11:02:15,609 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T11:02:15,609 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testEmptyExportFileSystemState" procId is: 198 2024-12-09T11:02:15,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=198 2024-12-09T11:02:15,610 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T11:02:15,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742276_1452 (size=448) 2024-12-09T11:02:15,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742276_1452 (size=448) 2024-12-09T11:02:15,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742276_1452 (size=448) 2024-12-09T11:02:15,618 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 1f4c77cea0dcbd9f3a4c280a94cf65f7, NAME => 'testtb-testEmptyExportFileSystemState,,1733742135607.1f4c77cea0dcbd9f3a4c280a94cf65f7.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T11:02:15,618 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => d5ba0cd50015a1edf9d0cc5f3cb70060, NAME => 'testtb-testEmptyExportFileSystemState,1,1733742135607.d5ba0cd50015a1edf9d0cc5f3cb70060.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T11:02:15,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742277_1453 (size=73) 2024-12-09T11:02:15,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742277_1453 (size=73) 2024-12-09T11:02:15,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742277_1453 (size=73) 2024-12-09T11:02:15,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742278_1454 (size=73) 2024-12-09T11:02:15,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742278_1454 (size=73) 2024-12-09T11:02:15,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742278_1454 (size=73) 2024-12-09T11:02:15,626 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1733742135607.1f4c77cea0dcbd9f3a4c280a94cf65f7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:02:15,626 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1733742135607.d5ba0cd50015a1edf9d0cc5f3cb70060.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:02:15,626 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing 1f4c77cea0dcbd9f3a4c280a94cf65f7, disabling compactions & flushes 2024-12-09T11:02:15,626 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing d5ba0cd50015a1edf9d0cc5f3cb70060, disabling compactions & flushes 2024-12-09T11:02:15,626 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1733742135607.1f4c77cea0dcbd9f3a4c280a94cf65f7. 2024-12-09T11:02:15,626 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1733742135607.d5ba0cd50015a1edf9d0cc5f3cb70060. 2024-12-09T11:02:15,626 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1733742135607.d5ba0cd50015a1edf9d0cc5f3cb70060. 2024-12-09T11:02:15,626 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1733742135607.1f4c77cea0dcbd9f3a4c280a94cf65f7. 2024-12-09T11:02:15,626 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1733742135607.d5ba0cd50015a1edf9d0cc5f3cb70060. after waiting 0 ms 2024-12-09T11:02:15,626 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1733742135607.1f4c77cea0dcbd9f3a4c280a94cf65f7. after waiting 0 ms 2024-12-09T11:02:15,626 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1733742135607.d5ba0cd50015a1edf9d0cc5f3cb70060. 2024-12-09T11:02:15,626 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1733742135607.1f4c77cea0dcbd9f3a4c280a94cf65f7. 2024-12-09T11:02:15,626 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1733742135607.d5ba0cd50015a1edf9d0cc5f3cb70060. 2024-12-09T11:02:15,626 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1733742135607.1f4c77cea0dcbd9f3a4c280a94cf65f7. 2024-12-09T11:02:15,626 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for d5ba0cd50015a1edf9d0cc5f3cb70060: Waiting for close lock at 1733742135626Disabling compacts and flushes for region at 1733742135626Disabling writes for close at 1733742135626Writing region close event to WAL at 1733742135626Closed at 1733742135626 2024-12-09T11:02:15,626 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for 1f4c77cea0dcbd9f3a4c280a94cf65f7: Waiting for close lock at 1733742135626Disabling compacts and flushes for region at 1733742135626Disabling writes for close at 1733742135626Writing region close event to WAL at 1733742135626Closed at 1733742135626 2024-12-09T11:02:15,627 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T11:02:15,627 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,,1733742135607.1f4c77cea0dcbd9f3a4c280a94cf65f7.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1733742135627"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733742135627"}]},"ts":"1733742135627"} 2024-12-09T11:02:15,627 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,1,1733742135607.d5ba0cd50015a1edf9d0cc5f3cb70060.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1733742135627"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733742135627"}]},"ts":"1733742135627"} 2024-12-09T11:02:15,630 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-09T11:02:15,630 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T11:02:15,631 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733742135630"}]},"ts":"1733742135630"} 2024-12-09T11:02:15,632 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLING in hbase:meta 2024-12-09T11:02:15,632 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {3469f9ca0af3=0} racks are {/default-rack=0} 2024-12-09T11:02:15,633 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T11:02:15,633 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T11:02:15,633 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T11:02:15,633 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T11:02:15,633 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T11:02:15,633 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T11:02:15,633 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T11:02:15,633 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T11:02:15,633 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T11:02:15,633 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T11:02:15,633 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=199, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=1f4c77cea0dcbd9f3a4c280a94cf65f7, ASSIGN}, {pid=200, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=d5ba0cd50015a1edf9d0cc5f3cb70060, ASSIGN}] 2024-12-09T11:02:15,634 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=200, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=d5ba0cd50015a1edf9d0cc5f3cb70060, ASSIGN 2024-12-09T11:02:15,634 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=199, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=1f4c77cea0dcbd9f3a4c280a94cf65f7, ASSIGN 2024-12-09T11:02:15,635 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=199, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=1f4c77cea0dcbd9f3a4c280a94cf65f7, ASSIGN; state=OFFLINE, location=3469f9ca0af3,39691,1733741766880; forceNewPlan=false, retain=false 2024-12-09T11:02:15,635 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=200, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=d5ba0cd50015a1edf9d0cc5f3cb70060, ASSIGN; state=OFFLINE, location=3469f9ca0af3,33293,1733741767044; forceNewPlan=false, retain=false 2024-12-09T11:02:15,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=198 2024-12-09T11:02:15,785 INFO [3469f9ca0af3:35815 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-09T11:02:15,785 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=200 updating hbase:meta row=d5ba0cd50015a1edf9d0cc5f3cb70060, regionState=OPENING, regionLocation=3469f9ca0af3,33293,1733741767044 2024-12-09T11:02:15,785 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=199 updating hbase:meta row=1f4c77cea0dcbd9f3a4c280a94cf65f7, regionState=OPENING, regionLocation=3469f9ca0af3,39691,1733741766880 2024-12-09T11:02:15,787 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=200, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=d5ba0cd50015a1edf9d0cc5f3cb70060, ASSIGN because future has completed 2024-12-09T11:02:15,787 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=201, ppid=200, state=RUNNABLE, hasLock=false; OpenRegionProcedure d5ba0cd50015a1edf9d0cc5f3cb70060, server=3469f9ca0af3,33293,1733741767044}] 2024-12-09T11:02:15,788 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=199, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=1f4c77cea0dcbd9f3a4c280a94cf65f7, ASSIGN because future has completed 2024-12-09T11:02:15,788 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=202, ppid=199, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1f4c77cea0dcbd9f3a4c280a94cf65f7, server=3469f9ca0af3,39691,1733741766880}] 2024-12-09T11:02:15,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=198 2024-12-09T11:02:15,942 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,1,1733742135607.d5ba0cd50015a1edf9d0cc5f3cb70060. 2024-12-09T11:02:15,942 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,,1733742135607.1f4c77cea0dcbd9f3a4c280a94cf65f7. 2024-12-09T11:02:15,942 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(7752): Opening region: {ENCODED => d5ba0cd50015a1edf9d0cc5f3cb70060, NAME => 'testtb-testEmptyExportFileSystemState,1,1733742135607.d5ba0cd50015a1edf9d0cc5f3cb70060.', STARTKEY => '1', ENDKEY => ''} 2024-12-09T11:02:15,942 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(7752): Opening region: {ENCODED => 1f4c77cea0dcbd9f3a4c280a94cf65f7, NAME => 'testtb-testEmptyExportFileSystemState,,1733742135607.1f4c77cea0dcbd9f3a4c280a94cf65f7.', STARTKEY => '', ENDKEY => '1'} 2024-12-09T11:02:15,942 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,1,1733742135607.d5ba0cd50015a1edf9d0cc5f3cb70060. service=AccessControlService 2024-12-09T11:02:15,942 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,,1733742135607.1f4c77cea0dcbd9f3a4c280a94cf65f7. service=AccessControlService 2024-12-09T11:02:15,943 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T11:02:15,943 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T11:02:15,943 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 1f4c77cea0dcbd9f3a4c280a94cf65f7 2024-12-09T11:02:15,943 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1733742135607.1f4c77cea0dcbd9f3a4c280a94cf65f7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:02:15,943 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState d5ba0cd50015a1edf9d0cc5f3cb70060 2024-12-09T11:02:15,943 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(7794): checking encryption for 1f4c77cea0dcbd9f3a4c280a94cf65f7 2024-12-09T11:02:15,943 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1733742135607.d5ba0cd50015a1edf9d0cc5f3cb70060.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:02:15,943 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(7797): checking classloading for 1f4c77cea0dcbd9f3a4c280a94cf65f7 2024-12-09T11:02:15,943 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(7794): checking encryption for d5ba0cd50015a1edf9d0cc5f3cb70060 2024-12-09T11:02:15,943 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(7797): checking classloading for d5ba0cd50015a1edf9d0cc5f3cb70060 2024-12-09T11:02:15,944 INFO [StoreOpener-1f4c77cea0dcbd9f3a4c280a94cf65f7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 1f4c77cea0dcbd9f3a4c280a94cf65f7 2024-12-09T11:02:15,945 INFO [StoreOpener-1f4c77cea0dcbd9f3a4c280a94cf65f7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1f4c77cea0dcbd9f3a4c280a94cf65f7 columnFamilyName cf 2024-12-09T11:02:15,946 INFO [StoreOpener-d5ba0cd50015a1edf9d0cc5f3cb70060-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d5ba0cd50015a1edf9d0cc5f3cb70060 2024-12-09T11:02:15,946 DEBUG [StoreOpener-1f4c77cea0dcbd9f3a4c280a94cf65f7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:02:15,946 INFO [StoreOpener-1f4c77cea0dcbd9f3a4c280a94cf65f7-1 {}] regionserver.HStore(327): Store=1f4c77cea0dcbd9f3a4c280a94cf65f7/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:02:15,947 INFO [StoreOpener-d5ba0cd50015a1edf9d0cc5f3cb70060-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d5ba0cd50015a1edf9d0cc5f3cb70060 columnFamilyName cf 2024-12-09T11:02:15,947 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(1038): replaying wal for 1f4c77cea0dcbd9f3a4c280a94cf65f7 2024-12-09T11:02:15,947 DEBUG [StoreOpener-d5ba0cd50015a1edf9d0cc5f3cb70060-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:02:15,947 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testEmptyExportFileSystemState/1f4c77cea0dcbd9f3a4c280a94cf65f7 2024-12-09T11:02:15,947 INFO [StoreOpener-d5ba0cd50015a1edf9d0cc5f3cb70060-1 {}] regionserver.HStore(327): Store=d5ba0cd50015a1edf9d0cc5f3cb70060/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:02:15,948 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testEmptyExportFileSystemState/1f4c77cea0dcbd9f3a4c280a94cf65f7 2024-12-09T11:02:15,948 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(1038): replaying wal for d5ba0cd50015a1edf9d0cc5f3cb70060 2024-12-09T11:02:15,948 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(1048): stopping wal replay for 1f4c77cea0dcbd9f3a4c280a94cf65f7 2024-12-09T11:02:15,948 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(1060): Cleaning up temporary data for 1f4c77cea0dcbd9f3a4c280a94cf65f7 2024-12-09T11:02:15,948 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testEmptyExportFileSystemState/d5ba0cd50015a1edf9d0cc5f3cb70060 2024-12-09T11:02:15,948 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testEmptyExportFileSystemState/d5ba0cd50015a1edf9d0cc5f3cb70060 2024-12-09T11:02:15,949 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(1048): stopping wal replay for d5ba0cd50015a1edf9d0cc5f3cb70060 2024-12-09T11:02:15,949 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(1060): Cleaning up temporary data for d5ba0cd50015a1edf9d0cc5f3cb70060 2024-12-09T11:02:15,949 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(1093): writing seq id for 1f4c77cea0dcbd9f3a4c280a94cf65f7 2024-12-09T11:02:15,950 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(1093): writing seq id for d5ba0cd50015a1edf9d0cc5f3cb70060 2024-12-09T11:02:15,952 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testEmptyExportFileSystemState/d5ba0cd50015a1edf9d0cc5f3cb70060/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:02:15,952 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testEmptyExportFileSystemState/1f4c77cea0dcbd9f3a4c280a94cf65f7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:02:15,952 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(1114): Opened d5ba0cd50015a1edf9d0cc5f3cb70060; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75245741, jitterRate=0.12124891579151154}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T11:02:15,952 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d5ba0cd50015a1edf9d0cc5f3cb70060 2024-12-09T11:02:15,952 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(1114): Opened 1f4c77cea0dcbd9f3a4c280a94cf65f7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69766325, jitterRate=0.039599254727363586}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T11:02:15,952 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1f4c77cea0dcbd9f3a4c280a94cf65f7 2024-12-09T11:02:15,953 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(1006): Region open journal for d5ba0cd50015a1edf9d0cc5f3cb70060: Running coprocessor pre-open hook at 1733742135943Writing region info on filesystem at 1733742135943Initializing all the Stores at 1733742135944 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733742135944Cleaning up temporary data from old regions at 1733742135949 (+5 ms)Running coprocessor post-open hooks at 1733742135952 (+3 ms)Region opened successfully at 1733742135953 (+1 ms) 2024-12-09T11:02:15,953 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(1006): Region open journal for 1f4c77cea0dcbd9f3a4c280a94cf65f7: Running coprocessor pre-open hook at 1733742135943Writing region info on filesystem at 1733742135943Initializing all the Stores at 1733742135943Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733742135944 (+1 ms)Cleaning up temporary data from old regions at 1733742135948 (+4 ms)Running coprocessor post-open hooks at 1733742135952 (+4 ms)Region opened successfully at 1733742135953 (+1 ms) 2024-12-09T11:02:15,953 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,1,1733742135607.d5ba0cd50015a1edf9d0cc5f3cb70060., pid=201, masterSystemTime=1733742135939 2024-12-09T11:02:15,953 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,,1733742135607.1f4c77cea0dcbd9f3a4c280a94cf65f7., pid=202, masterSystemTime=1733742135939 2024-12-09T11:02:15,955 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,1,1733742135607.d5ba0cd50015a1edf9d0cc5f3cb70060. 2024-12-09T11:02:15,955 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,1,1733742135607.d5ba0cd50015a1edf9d0cc5f3cb70060. 2024-12-09T11:02:15,955 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=200 updating hbase:meta row=d5ba0cd50015a1edf9d0cc5f3cb70060, regionState=OPEN, openSeqNum=2, regionLocation=3469f9ca0af3,33293,1733741767044 2024-12-09T11:02:15,956 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,,1733742135607.1f4c77cea0dcbd9f3a4c280a94cf65f7. 2024-12-09T11:02:15,956 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,,1733742135607.1f4c77cea0dcbd9f3a4c280a94cf65f7. 2024-12-09T11:02:15,956 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=199 updating hbase:meta row=1f4c77cea0dcbd9f3a4c280a94cf65f7, regionState=OPEN, openSeqNum=2, regionLocation=3469f9ca0af3,39691,1733741766880 2024-12-09T11:02:15,957 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=201, ppid=200, state=RUNNABLE, hasLock=false; OpenRegionProcedure d5ba0cd50015a1edf9d0cc5f3cb70060, server=3469f9ca0af3,33293,1733741767044 because future has completed 2024-12-09T11:02:15,958 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=202, ppid=199, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1f4c77cea0dcbd9f3a4c280a94cf65f7, server=3469f9ca0af3,39691,1733741766880 because future has completed 2024-12-09T11:02:15,960 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=201, resume processing ppid=200 2024-12-09T11:02:15,960 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=201, ppid=200, state=SUCCESS, hasLock=false; OpenRegionProcedure d5ba0cd50015a1edf9d0cc5f3cb70060, server=3469f9ca0af3,33293,1733741767044 in 171 msec 2024-12-09T11:02:15,961 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=202, resume processing ppid=199 2024-12-09T11:02:15,961 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=202, ppid=199, state=SUCCESS, hasLock=false; OpenRegionProcedure 1f4c77cea0dcbd9f3a4c280a94cf65f7, server=3469f9ca0af3,39691,1733741766880 in 171 msec 2024-12-09T11:02:15,962 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=200, ppid=198, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=d5ba0cd50015a1edf9d0cc5f3cb70060, ASSIGN in 327 msec 2024-12-09T11:02:15,963 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=199, resume processing ppid=198 2024-12-09T11:02:15,963 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=199, ppid=198, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=1f4c77cea0dcbd9f3a4c280a94cf65f7, ASSIGN in 328 msec 2024-12-09T11:02:15,964 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T11:02:15,964 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733742135964"}]},"ts":"1733742135964"} 2024-12-09T11:02:15,965 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLED in hbase:meta 2024-12-09T11:02:15,966 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T11:02:15,966 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testEmptyExportFileSystemState jenkins: RWXCA 2024-12-09T11:02:15,968 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39691 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-09T11:02:15,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:02:15,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:02:15,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:02:15,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:02:15,972 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-09T11:02:15,972 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T11:02:15,973 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-09T11:02:15,973 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-09T11:02:15,973 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-09T11:02:15,973 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T11:02:15,973 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T11:02:15,973 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T11:02:15,974 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=198, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState in 365 msec 2024-12-09T11:02:16,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=198 2024-12-09T11:02:16,233 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-09T11:02:16,233 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-09T11:02:16,235 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-12-09T11:02:16,235 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testEmptyExportFileSystemState,,1733742135607.1f4c77cea0dcbd9f3a4c280a94cf65f7. 2024-12-09T11:02:16,235 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T11:02:16,237 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-09T11:02:16,241 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-09T11:02:16,245 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-09T11:02:16,247 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-09T11:02:16,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733742136247 (current time:1733742136247). 2024-12-09T11:02:16,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T11:02:16,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-09T11:02:16,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T11:02:16,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78c01b83, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:02:16,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T11:02:16,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T11:02:16,249 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T11:02:16,249 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T11:02:16,249 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T11:02:16,249 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ce3bf0c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:02:16,249 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T11:02:16,249 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T11:02:16,249 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:02:16,250 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44830, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T11:02:16,250 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2cd8566d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:02:16,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:02:16,251 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,33293,1733741767044, seqNum=-1] 2024-12-09T11:02:16,251 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:02:16,252 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33694, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:02:16,253 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815. 2024-12-09T11:02:16,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T11:02:16,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:02:16,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:02:16,253 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T11:02:16,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e937aee, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:02:16,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T11:02:16,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T11:02:16,254 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T11:02:16,254 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T11:02:16,254 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T11:02:16,255 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@fa8c8b4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:02:16,255 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T11:02:16,255 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T11:02:16,255 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:02:16,255 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44844, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T11:02:16,256 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@afde22a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:02:16,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:02:16,257 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,33293,1733741767044, seqNum=-1] 2024-12-09T11:02:16,257 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:02:16,257 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33706, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:02:16,259 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb., hostname=3469f9ca0af3,39691,1733741766880, seqNum=2] 2024-12-09T11:02:16,259 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:02:16,260 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48590, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:02:16,261 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815. 2024-12-09T11:02:16,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T11:02:16,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:02:16,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:02:16,261 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T11:02:16,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-09T11:02:16,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T11:02:16,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-09T11:02:16,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-12-09T11:02:16,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-12-09T11:02:16,263 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T11:02:16,264 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T11:02:16,266 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T11:02:16,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742279_1455 (size=185) 2024-12-09T11:02:16,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742279_1455 (size=185) 2024-12-09T11:02:16,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742279_1455 (size=185) 2024-12-09T11:02:16,273 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T11:02:16,273 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1f4c77cea0dcbd9f3a4c280a94cf65f7}, {pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d5ba0cd50015a1edf9d0cc5f3cb70060}] 2024-12-09T11:02:16,273 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1f4c77cea0dcbd9f3a4c280a94cf65f7 2024-12-09T11:02:16,274 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d5ba0cd50015a1edf9d0cc5f3cb70060 2024-12-09T11:02:16,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-12-09T11:02:16,425 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39691 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=204 2024-12-09T11:02:16,425 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33293 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=205 2024-12-09T11:02:16,425 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1733742135607.1f4c77cea0dcbd9f3a4c280a94cf65f7. 2024-12-09T11:02:16,425 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(2603): Flush status journal for 1f4c77cea0dcbd9f3a4c280a94cf65f7: 2024-12-09T11:02:16,425 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1733742135607.1f4c77cea0dcbd9f3a4c280a94cf65f7. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-09T11:02:16,426 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1733742135607.1f4c77cea0dcbd9f3a4c280a94cf65f7.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-09T11:02:16,426 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T11:02:16,426 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-09T11:02:16,426 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1733742135607.d5ba0cd50015a1edf9d0cc5f3cb70060. 2024-12-09T11:02:16,426 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2603): Flush status journal for d5ba0cd50015a1edf9d0cc5f3cb70060: 2024-12-09T11:02:16,426 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1733742135607.d5ba0cd50015a1edf9d0cc5f3cb70060. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-09T11:02:16,426 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1733742135607.d5ba0cd50015a1edf9d0cc5f3cb70060.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-09T11:02:16,426 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T11:02:16,426 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-09T11:02:16,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742280_1456 (size=76) 2024-12-09T11:02:16,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742280_1456 (size=76) 2024-12-09T11:02:16,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742280_1456 (size=76) 2024-12-09T11:02:16,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742281_1457 (size=76) 2024-12-09T11:02:16,436 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1733742135607.1f4c77cea0dcbd9f3a4c280a94cf65f7. 2024-12-09T11:02:16,436 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=204 2024-12-09T11:02:16,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742281_1457 (size=76) 2024-12-09T11:02:16,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742281_1457 (size=76) 2024-12-09T11:02:16,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=204 2024-12-09T11:02:16,437 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 1f4c77cea0dcbd9f3a4c280a94cf65f7 2024-12-09T11:02:16,437 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1f4c77cea0dcbd9f3a4c280a94cf65f7 2024-12-09T11:02:16,437 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1733742135607.d5ba0cd50015a1edf9d0cc5f3cb70060. 2024-12-09T11:02:16,437 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=205 2024-12-09T11:02:16,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=205 2024-12-09T11:02:16,438 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region d5ba0cd50015a1edf9d0cc5f3cb70060 2024-12-09T11:02:16,438 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d5ba0cd50015a1edf9d0cc5f3cb70060 2024-12-09T11:02:16,441 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=204, ppid=203, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 1f4c77cea0dcbd9f3a4c280a94cf65f7 in 165 msec 2024-12-09T11:02:16,443 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=205, resume processing ppid=203 2024-12-09T11:02:16,443 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=205, ppid=203, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d5ba0cd50015a1edf9d0cc5f3cb70060 in 166 msec 2024-12-09T11:02:16,443 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T11:02:16,444 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T11:02:16,444 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-09T11:02:16,444 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-09T11:02:16,445 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:02:16,445 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-09T11:02:16,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742282_1458 (size=68) 2024-12-09T11:02:16,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742282_1458 (size=68) 2024-12-09T11:02:16,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742282_1458 (size=68) 2024-12-09T11:02:16,454 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T11:02:16,454 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testEmptyExportFileSystemState 2024-12-09T11:02:16,454 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-09T11:02:16,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742283_1459 (size=673) 2024-12-09T11:02:16,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742283_1459 (size=673) 2024-12-09T11:02:16,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742283_1459 (size=673) 2024-12-09T11:02:16,469 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-09T11:02:16,469 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-09T11:02:16,470 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-09T11:02:16,473 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T11:02:16,481 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T11:02:16,482 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-09T11:02:16,483 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T11:02:16,483 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-12-09T11:02:16,484 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=203, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 221 msec 2024-12-09T11:02:16,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-12-09T11:02:16,582 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-09T11:02:16,587 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39691 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,,1733742135607.1f4c77cea0dcbd9f3a4c280a94cf65f7. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T11:02:16,588 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33293 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,1,1733742135607.d5ba0cd50015a1edf9d0cc5f3cb70060. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T11:02:16,589 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-09T11:02:16,591 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-12-09T11:02:16,591 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testEmptyExportFileSystemState,,1733742135607.1f4c77cea0dcbd9f3a4c280a94cf65f7. 2024-12-09T11:02:16,591 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T11:02:16,593 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-09T11:02:16,597 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-09T11:02:16,601 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-09T11:02:16,602 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-09T11:02:16,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733742136602 (current time:1733742136602). 2024-12-09T11:02:16,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T11:02:16,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-09T11:02:16,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T11:02:16,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24f9830c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:02:16,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T11:02:16,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T11:02:16,604 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T11:02:16,604 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T11:02:16,604 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T11:02:16,604 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@39aa0867, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:02:16,604 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T11:02:16,604 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T11:02:16,604 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:02:16,605 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44864, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T11:02:16,605 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@670f5ea0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:02:16,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:02:16,606 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,33293,1733741767044, seqNum=-1] 2024-12-09T11:02:16,606 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:02:16,607 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33722, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:02:16,608 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815. 2024-12-09T11:02:16,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T11:02:16,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:02:16,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:02:16,608 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T11:02:16,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46641603, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:02:16,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T11:02:16,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T11:02:16,609 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T11:02:16,609 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T11:02:16,609 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T11:02:16,610 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48fc82ec, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:02:16,610 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T11:02:16,610 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T11:02:16,610 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:02:16,610 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44878, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T11:02:16,611 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@794b6edb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:02:16,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:02:16,611 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,33293,1733741767044, seqNum=-1] 2024-12-09T11:02:16,612 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:02:16,612 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33724, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:02:16,614 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb., hostname=3469f9ca0af3,39691,1733741766880, seqNum=2] 2024-12-09T11:02:16,614 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:02:16,614 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48598, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:02:16,615 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815. 2024-12-09T11:02:16,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T11:02:16,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:02:16,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:02:16,615 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T11:02:16,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-09T11:02:16,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T11:02:16,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=206, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-09T11:02:16,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 206 2024-12-09T11:02:16,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-12-09T11:02:16,618 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T11:02:16,618 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T11:02:16,620 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T11:02:16,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742284_1460 (size=180) 2024-12-09T11:02:16,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742284_1460 (size=180) 2024-12-09T11:02:16,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742284_1460 (size=180) 2024-12-09T11:02:16,626 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T11:02:16,626 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=207, ppid=206, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1f4c77cea0dcbd9f3a4c280a94cf65f7}, {pid=208, ppid=206, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d5ba0cd50015a1edf9d0cc5f3cb70060}] 2024-12-09T11:02:16,627 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=208, ppid=206, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d5ba0cd50015a1edf9d0cc5f3cb70060 2024-12-09T11:02:16,627 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=207, ppid=206, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1f4c77cea0dcbd9f3a4c280a94cf65f7 2024-12-09T11:02:16,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-12-09T11:02:16,779 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39691 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=207 2024-12-09T11:02:16,779 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33293 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=208 2024-12-09T11:02:16,779 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1733742135607.1f4c77cea0dcbd9f3a4c280a94cf65f7. 2024-12-09T11:02:16,779 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1733742135607.d5ba0cd50015a1edf9d0cc5f3cb70060. 2024-12-09T11:02:16,779 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegion(2902): Flushing d5ba0cd50015a1edf9d0cc5f3cb70060 1/1 column families, dataSize=2.87 KB heapSize=6.44 KB 2024-12-09T11:02:16,779 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegion(2902): Flushing 1f4c77cea0dcbd9f3a4c280a94cf65f7 1/1 column families, dataSize=400 B heapSize=1.09 KB 2024-12-09T11:02:16,796 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120949315bdb607d4517959e777ce6c6f31a_1f4c77cea0dcbd9f3a4c280a94cf65f7 is 71, key is 02f769ecf799be955f1c6574435ef9f1/cf:q/1733742136586/Put/seqid=0 2024-12-09T11:02:16,796 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241209b874519ecdb24f96b81ac07b6a189fe6_d5ba0cd50015a1edf9d0cc5f3cb70060 is 71, key is 10652d78be2cc9a4f67bdde089a6d693/cf:q/1733742136588/Put/seqid=0 2024-12-09T11:02:16,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742285_1461 (size=5311) 2024-12-09T11:02:16,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742286_1462 (size=7961) 2024-12-09T11:02:16,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742285_1461 (size=5311) 2024-12-09T11:02:16,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742285_1461 (size=5311) 2024-12-09T11:02:16,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742286_1462 (size=7961) 2024-12-09T11:02:16,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742286_1462 (size=7961) 2024-12-09T11:02:16,805 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:02:16,806 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:02:16,809 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120949315bdb607d4517959e777ce6c6f31a_1f4c77cea0dcbd9f3a4c280a94cf65f7 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e2024120949315bdb607d4517959e777ce6c6f31a_1f4c77cea0dcbd9f3a4c280a94cf65f7 2024-12-09T11:02:16,810 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241209b874519ecdb24f96b81ac07b6a189fe6_d5ba0cd50015a1edf9d0cc5f3cb70060 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241209b874519ecdb24f96b81ac07b6a189fe6_d5ba0cd50015a1edf9d0cc5f3cb70060 2024-12-09T11:02:16,810 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testEmptyExportFileSystemState/1f4c77cea0dcbd9f3a4c280a94cf65f7/.tmp/cf/1de029ace4d54cf1a0817af15494087a, store: [table=testtb-testEmptyExportFileSystemState family=cf region=1f4c77cea0dcbd9f3a4c280a94cf65f7] 2024-12-09T11:02:16,810 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testEmptyExportFileSystemState/1f4c77cea0dcbd9f3a4c280a94cf65f7/.tmp/cf/1de029ace4d54cf1a0817af15494087a is 214, key is 03be62bd2da27d4349e12a26a7d839cd3/cf:q/1733742136586/Put/seqid=0 2024-12-09T11:02:16,810 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testEmptyExportFileSystemState/d5ba0cd50015a1edf9d0cc5f3cb70060/.tmp/cf/9c1b88b18eec4f45ab3b6c0f51036436, store: [table=testtb-testEmptyExportFileSystemState family=cf region=d5ba0cd50015a1edf9d0cc5f3cb70060] 2024-12-09T11:02:16,811 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testEmptyExportFileSystemState/d5ba0cd50015a1edf9d0cc5f3cb70060/.tmp/cf/9c1b88b18eec4f45ab3b6c0f51036436 is 214, key is 1e1d052f1df119d1fe3e0f0004a407832/cf:q/1733742136588/Put/seqid=0 2024-12-09T11:02:16,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742287_1463 (size=6566) 2024-12-09T11:02:16,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742287_1463 (size=6566) 2024-12-09T11:02:16,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742287_1463 (size=6566) 2024-12-09T11:02:16,815 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=400, hasBloomFilter=true, into tmp file hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testEmptyExportFileSystemState/1f4c77cea0dcbd9f3a4c280a94cf65f7/.tmp/cf/1de029ace4d54cf1a0817af15494087a 2024-12-09T11:02:16,819 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testEmptyExportFileSystemState/1f4c77cea0dcbd9f3a4c280a94cf65f7/.tmp/cf/1de029ace4d54cf1a0817af15494087a as hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testEmptyExportFileSystemState/1f4c77cea0dcbd9f3a4c280a94cf65f7/cf/1de029ace4d54cf1a0817af15494087a 2024-12-09T11:02:16,823 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testEmptyExportFileSystemState/1f4c77cea0dcbd9f3a4c280a94cf65f7/cf/1de029ace4d54cf1a0817af15494087a, entries=6, sequenceid=6, filesize=6.4 K 2024-12-09T11:02:16,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742288_1464 (size=14607) 2024-12-09T11:02:16,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742288_1464 (size=14607) 2024-12-09T11:02:16,824 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegion(3140): Finished flush of dataSize ~400 B/400, heapSize ~1.08 KB/1104, currentSize=0 B/0 for 1f4c77cea0dcbd9f3a4c280a94cf65f7 in 45ms, sequenceid=6, compaction requested=false 2024-12-09T11:02:16,824 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testEmptyExportFileSystemState' 2024-12-09T11:02:16,825 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testEmptyExportFileSystemState/d5ba0cd50015a1edf9d0cc5f3cb70060/.tmp/cf/9c1b88b18eec4f45ab3b6c0f51036436 2024-12-09T11:02:16,825 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegion(2603): Flush status journal for 1f4c77cea0dcbd9f3a4c280a94cf65f7: 2024-12-09T11:02:16,825 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1733742135607.1f4c77cea0dcbd9f3a4c280a94cf65f7. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-09T11:02:16,825 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1733742135607.1f4c77cea0dcbd9f3a4c280a94cf65f7.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-09T11:02:16,825 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T11:02:16,825 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testEmptyExportFileSystemState/1f4c77cea0dcbd9f3a4c280a94cf65f7/cf/1de029ace4d54cf1a0817af15494087a] hfiles 2024-12-09T11:02:16,825 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testEmptyExportFileSystemState/1f4c77cea0dcbd9f3a4c280a94cf65f7/cf/1de029ace4d54cf1a0817af15494087a for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-09T11:02:16,829 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testEmptyExportFileSystemState/d5ba0cd50015a1edf9d0cc5f3cb70060/.tmp/cf/9c1b88b18eec4f45ab3b6c0f51036436 as hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testEmptyExportFileSystemState/d5ba0cd50015a1edf9d0cc5f3cb70060/cf/9c1b88b18eec4f45ab3b6c0f51036436 2024-12-09T11:02:16,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742288_1464 (size=14607) 2024-12-09T11:02:16,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742289_1465 (size=115) 2024-12-09T11:02:16,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742289_1465 (size=115) 2024-12-09T11:02:16,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742289_1465 (size=115) 2024-12-09T11:02:16,831 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1733742135607.1f4c77cea0dcbd9f3a4c280a94cf65f7. 2024-12-09T11:02:16,831 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=207 2024-12-09T11:02:16,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=207 2024-12-09T11:02:16,832 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 1f4c77cea0dcbd9f3a4c280a94cf65f7 2024-12-09T11:02:16,832 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=207, ppid=206, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1f4c77cea0dcbd9f3a4c280a94cf65f7 2024-12-09T11:02:16,833 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testEmptyExportFileSystemState/d5ba0cd50015a1edf9d0cc5f3cb70060/cf/9c1b88b18eec4f45ab3b6c0f51036436, entries=44, sequenceid=6, filesize=14.3 K 2024-12-09T11:02:16,834 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegion(3140): Finished flush of dataSize ~2.87 KB/2936, heapSize ~6.42 KB/6576, currentSize=0 B/0 for d5ba0cd50015a1edf9d0cc5f3cb70060 in 55ms, sequenceid=6, compaction requested=false 2024-12-09T11:02:16,834 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegion(2603): Flush status journal for d5ba0cd50015a1edf9d0cc5f3cb70060: 2024-12-09T11:02:16,834 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1733742135607.d5ba0cd50015a1edf9d0cc5f3cb70060. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-09T11:02:16,834 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1733742135607.d5ba0cd50015a1edf9d0cc5f3cb70060.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-09T11:02:16,834 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T11:02:16,834 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testEmptyExportFileSystemState/d5ba0cd50015a1edf9d0cc5f3cb70060/cf/9c1b88b18eec4f45ab3b6c0f51036436] hfiles 2024-12-09T11:02:16,834 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testEmptyExportFileSystemState/d5ba0cd50015a1edf9d0cc5f3cb70060/cf/9c1b88b18eec4f45ab3b6c0f51036436 for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-09T11:02:16,834 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=207, ppid=206, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 1f4c77cea0dcbd9f3a4c280a94cf65f7 in 207 msec 2024-12-09T11:02:16,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742290_1466 (size=115) 2024-12-09T11:02:16,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742290_1466 (size=115) 2024-12-09T11:02:16,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742290_1466 (size=115) 2024-12-09T11:02:16,846 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1733742135607.d5ba0cd50015a1edf9d0cc5f3cb70060. 2024-12-09T11:02:16,846 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=208 2024-12-09T11:02:16,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=208 2024-12-09T11:02:16,847 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region d5ba0cd50015a1edf9d0cc5f3cb70060 2024-12-09T11:02:16,847 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=208, ppid=206, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d5ba0cd50015a1edf9d0cc5f3cb70060 2024-12-09T11:02:16,849 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=208, resume processing ppid=206 2024-12-09T11:02:16,849 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=208, ppid=206, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d5ba0cd50015a1edf9d0cc5f3cb70060 in 222 msec 2024-12-09T11:02:16,849 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T11:02:16,850 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T11:02:16,851 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-09T11:02:16,851 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-09T11:02:16,851 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:02:16,852 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241209b874519ecdb24f96b81ac07b6a189fe6_d5ba0cd50015a1edf9d0cc5f3cb70060, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e2024120949315bdb607d4517959e777ce6c6f31a_1f4c77cea0dcbd9f3a4c280a94cf65f7] hfiles 2024-12-09T11:02:16,852 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241209b874519ecdb24f96b81ac07b6a189fe6_d5ba0cd50015a1edf9d0cc5f3cb70060 2024-12-09T11:02:16,852 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e2024120949315bdb607d4517959e777ce6c6f31a_1f4c77cea0dcbd9f3a4c280a94cf65f7 2024-12-09T11:02:16,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742291_1467 (size=299) 2024-12-09T11:02:16,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742291_1467 (size=299) 2024-12-09T11:02:16,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742291_1467 (size=299) 2024-12-09T11:02:16,865 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T11:02:16,865 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testEmptyExportFileSystemState 2024-12-09T11:02:16,866 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState 2024-12-09T11:02:16,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742292_1468 (size=983) 2024-12-09T11:02:16,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742292_1468 (size=983) 2024-12-09T11:02:16,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742292_1468 (size=983) 2024-12-09T11:02:16,882 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T11:02:16,891 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T11:02:16,891 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testEmptyExportFileSystemState 2024-12-09T11:02:16,893 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T11:02:16,893 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 206 2024-12-09T11:02:16,894 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=206, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 277 msec 2024-12-09T11:02:16,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-12-09T11:02:16,932 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-09T11:02:16,932 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742136932 2024-12-09T11:02:16,932 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:35869, tgtDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742136932, rawTgtDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742136932, srcFsUri=hdfs://localhost:35869, srcDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T11:02:16,961 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:35869, inputRoot=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T11:02:16,961 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1751713061_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742136932, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742136932/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-09T11:02:16,962 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-09T11:02:16,966 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742136932/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-09T11:02:16,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742293_1469 (size=185) 2024-12-09T11:02:16,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742293_1469 (size=185) 2024-12-09T11:02:16,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742294_1470 (size=673) 2024-12-09T11:02:16,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742293_1469 (size=185) 2024-12-09T11:02:16,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742294_1470 (size=673) 2024-12-09T11:02:16,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742294_1470 (size=673) 2024-12-09T11:02:16,976 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:02:16,976 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:02:16,976 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:02:17,985 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/hadoop-10444521071875848669.jar 2024-12-09T11:02:17,986 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:02:17,986 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:02:18,052 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/hadoop-9332751807976777593.jar 2024-12-09T11:02:18,053 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:02:18,053 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:02:18,053 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:02:18,054 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:02:18,054 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:02:18,054 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:02:18,054 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-09T11:02:18,054 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-09T11:02:18,054 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-09T11:02:18,055 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-09T11:02:18,055 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-09T11:02:18,055 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-09T11:02:18,055 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-09T11:02:18,055 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-09T11:02:18,056 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-09T11:02:18,056 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-09T11:02:18,056 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-09T11:02:18,056 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T11:02:18,056 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T11:02:18,057 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T11:02:18,057 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T11:02:18,057 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T11:02:18,057 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T11:02:18,057 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T11:02:18,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742295_1471 (size=24020) 2024-12-09T11:02:18,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742295_1471 (size=24020) 2024-12-09T11:02:18,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742295_1471 (size=24020) 2024-12-09T11:02:18,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742296_1472 (size=77755) 2024-12-09T11:02:18,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742296_1472 (size=77755) 2024-12-09T11:02:18,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742296_1472 (size=77755) 2024-12-09T11:02:18,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742297_1473 (size=131360) 2024-12-09T11:02:18,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742297_1473 (size=131360) 2024-12-09T11:02:18,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742297_1473 (size=131360) 2024-12-09T11:02:18,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742298_1474 (size=111793) 2024-12-09T11:02:18,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742298_1474 (size=111793) 2024-12-09T11:02:18,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742298_1474 (size=111793) 2024-12-09T11:02:18,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742299_1475 (size=6425021) 2024-12-09T11:02:18,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742299_1475 (size=6425021) 2024-12-09T11:02:18,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742299_1475 (size=6425021) 2024-12-09T11:02:18,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742300_1476 (size=1832290) 2024-12-09T11:02:18,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742300_1476 (size=1832290) 2024-12-09T11:02:18,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742300_1476 (size=1832290) 2024-12-09T11:02:18,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742301_1477 (size=8360282) 2024-12-09T11:02:18,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742301_1477 (size=8360282) 2024-12-09T11:02:18,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742301_1477 (size=8360282) 2024-12-09T11:02:18,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742302_1478 (size=503880) 2024-12-09T11:02:18,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742302_1478 (size=503880) 2024-12-09T11:02:18,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742302_1478 (size=503880) 2024-12-09T11:02:18,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742303_1479 (size=322274) 2024-12-09T11:02:18,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742303_1479 (size=322274) 2024-12-09T11:02:18,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742303_1479 (size=322274) 2024-12-09T11:02:18,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742304_1480 (size=20406) 2024-12-09T11:02:18,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742304_1480 (size=20406) 2024-12-09T11:02:18,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742304_1480 (size=20406) 2024-12-09T11:02:18,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742305_1481 (size=45609) 2024-12-09T11:02:18,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742305_1481 (size=45609) 2024-12-09T11:02:18,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742305_1481 (size=45609) 2024-12-09T11:02:18,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742306_1482 (size=136454) 2024-12-09T11:02:18,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742306_1482 (size=136454) 2024-12-09T11:02:18,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742306_1482 (size=136454) 2024-12-09T11:02:18,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742307_1483 (size=1597136) 2024-12-09T11:02:18,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742307_1483 (size=1597136) 2024-12-09T11:02:18,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742307_1483 (size=1597136) 2024-12-09T11:02:18,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742308_1484 (size=30873) 2024-12-09T11:02:18,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742308_1484 (size=30873) 2024-12-09T11:02:18,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742308_1484 (size=30873) 2024-12-09T11:02:18,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742309_1485 (size=443171) 2024-12-09T11:02:18,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742309_1485 (size=443171) 2024-12-09T11:02:18,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742309_1485 (size=443171) 2024-12-09T11:02:18,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742310_1486 (size=29229) 2024-12-09T11:02:18,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742310_1486 (size=29229) 2024-12-09T11:02:18,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742310_1486 (size=29229) 2024-12-09T11:02:18,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742311_1487 (size=903861) 2024-12-09T11:02:18,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742311_1487 (size=903861) 2024-12-09T11:02:18,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742311_1487 (size=903861) 2024-12-09T11:02:18,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742312_1488 (size=5175431) 2024-12-09T11:02:18,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742312_1488 (size=5175431) 2024-12-09T11:02:18,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742312_1488 (size=5175431) 2024-12-09T11:02:18,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742313_1489 (size=232881) 2024-12-09T11:02:18,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742313_1489 (size=232881) 2024-12-09T11:02:18,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742313_1489 (size=232881) 2024-12-09T11:02:18,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742314_1490 (size=1323991) 2024-12-09T11:02:18,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742314_1490 (size=1323991) 2024-12-09T11:02:18,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742314_1490 (size=1323991) 2024-12-09T11:02:18,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742315_1491 (size=4695811) 2024-12-09T11:02:18,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742315_1491 (size=4695811) 2024-12-09T11:02:18,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742315_1491 (size=4695811) 2024-12-09T11:02:18,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742316_1492 (size=1877034) 2024-12-09T11:02:18,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742316_1492 (size=1877034) 2024-12-09T11:02:18,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742316_1492 (size=1877034) 2024-12-09T11:02:18,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742317_1493 (size=217555) 2024-12-09T11:02:18,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742317_1493 (size=217555) 2024-12-09T11:02:18,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742317_1493 (size=217555) 2024-12-09T11:02:18,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742318_1494 (size=4188619) 2024-12-09T11:02:18,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742318_1494 (size=4188619) 2024-12-09T11:02:18,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742318_1494 (size=4188619) 2024-12-09T11:02:18,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742319_1495 (size=127628) 2024-12-09T11:02:18,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742319_1495 (size=127628) 2024-12-09T11:02:18,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742319_1495 (size=127628) 2024-12-09T11:02:18,392 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-09T11:02:18,395 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'emptySnaptb0-testEmptyExportFileSystemState' hfile list 2024-12-09T11:02:18,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742320_1496 (size=7) 2024-12-09T11:02:18,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742320_1496 (size=7) 2024-12-09T11:02:18,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742320_1496 (size=7) 2024-12-09T11:02:18,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742321_1497 (size=10) 2024-12-09T11:02:18,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742321_1497 (size=10) 2024-12-09T11:02:18,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742321_1497 (size=10) 2024-12-09T11:02:18,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742322_1498 (size=303982) 2024-12-09T11:02:18,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742322_1498 (size=303982) 2024-12-09T11:02:18,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742322_1498 (size=303982) 2024-12-09T11:02:18,434 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T11:02:18,434 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T11:02:18,648 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0008_000001 (auth:SIMPLE) from 127.0.0.1:55372 2024-12-09T11:02:20,778 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T11:02:24,062 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0008_000001 (auth:SIMPLE) from 127.0.0.1:40734 2024-12-09T11:02:24,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742323_1499 (size=349656) 2024-12-09T11:02:24,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742323_1499 (size=349656) 2024-12-09T11:02:24,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742323_1499 (size=349656) 2024-12-09T11:02:25,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742324_1500 (size=8568) 2024-12-09T11:02:25,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742324_1500 (size=8568) 2024-12-09T11:02:25,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742324_1500 (size=8568) 2024-12-09T11:02:25,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742325_1501 (size=460) 2024-12-09T11:02:25,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742325_1501 (size=460) 2024-12-09T11:02:25,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742325_1501 (size=460) 2024-12-09T11:02:25,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742326_1502 (size=8568) 2024-12-09T11:02:25,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742326_1502 (size=8568) 2024-12-09T11:02:25,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742326_1502 (size=8568) 2024-12-09T11:02:25,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742327_1503 (size=349656) 2024-12-09T11:02:25,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742327_1503 (size=349656) 2024-12-09T11:02:25,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742327_1503 (size=349656) 2024-12-09T11:02:26,545 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-09T11:02:26,546 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-09T11:02:26,550 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: emptySnaptb0-testEmptyExportFileSystemState 2024-12-09T11:02:26,550 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-09T11:02:26,550 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-09T11:02:26,550 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1751713061_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-09T11:02:26,551 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-09T11:02:26,551 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-09T11:02:26,551 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1751713061_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742136932/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742136932/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-09T11:02:26,551 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742136932/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-09T11:02:26,551 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742136932/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-09T11:02:26,556 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testEmptyExportFileSystemState 2024-12-09T11:02:26,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=209, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-09T11:02:26,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=209 2024-12-09T11:02:26,559 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733742146559"}]},"ts":"1733742146559"} 2024-12-09T11:02:26,560 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLING in hbase:meta 2024-12-09T11:02:26,560 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testEmptyExportFileSystemState to state=DISABLING 2024-12-09T11:02:26,561 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=210, ppid=209, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState}] 2024-12-09T11:02:26,562 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=211, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=1f4c77cea0dcbd9f3a4c280a94cf65f7, UNASSIGN}, {pid=212, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=d5ba0cd50015a1edf9d0cc5f3cb70060, UNASSIGN}] 2024-12-09T11:02:26,563 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=212, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=d5ba0cd50015a1edf9d0cc5f3cb70060, UNASSIGN 2024-12-09T11:02:26,563 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=211, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=1f4c77cea0dcbd9f3a4c280a94cf65f7, UNASSIGN 2024-12-09T11:02:26,564 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=212 updating hbase:meta row=d5ba0cd50015a1edf9d0cc5f3cb70060, regionState=CLOSING, regionLocation=3469f9ca0af3,33293,1733741767044 2024-12-09T11:02:26,564 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=211 updating hbase:meta row=1f4c77cea0dcbd9f3a4c280a94cf65f7, regionState=CLOSING, regionLocation=3469f9ca0af3,39691,1733741766880 2024-12-09T11:02:26,565 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=211, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=1f4c77cea0dcbd9f3a4c280a94cf65f7, UNASSIGN because future has completed 2024-12-09T11:02:26,565 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T11:02:26,565 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=213, ppid=211, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1f4c77cea0dcbd9f3a4c280a94cf65f7, server=3469f9ca0af3,39691,1733741766880}] 2024-12-09T11:02:26,566 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=212, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=d5ba0cd50015a1edf9d0cc5f3cb70060, UNASSIGN because future has completed 2024-12-09T11:02:26,566 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T11:02:26,566 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=214, ppid=212, state=RUNNABLE, hasLock=false; CloseRegionProcedure d5ba0cd50015a1edf9d0cc5f3cb70060, server=3469f9ca0af3,33293,1733741767044}] 2024-12-09T11:02:26,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=209 2024-12-09T11:02:26,718 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] handler.UnassignRegionHandler(122): Close 1f4c77cea0dcbd9f3a4c280a94cf65f7 2024-12-09T11:02:26,718 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T11:02:26,718 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1722): Closing 1f4c77cea0dcbd9f3a4c280a94cf65f7, disabling compactions & flushes 2024-12-09T11:02:26,718 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1733742135607.1f4c77cea0dcbd9f3a4c280a94cf65f7. 2024-12-09T11:02:26,718 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1733742135607.1f4c77cea0dcbd9f3a4c280a94cf65f7. 2024-12-09T11:02:26,718 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1733742135607.1f4c77cea0dcbd9f3a4c280a94cf65f7. after waiting 0 ms 2024-12-09T11:02:26,718 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1733742135607.1f4c77cea0dcbd9f3a4c280a94cf65f7. 2024-12-09T11:02:26,718 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] handler.UnassignRegionHandler(122): Close d5ba0cd50015a1edf9d0cc5f3cb70060 2024-12-09T11:02:26,719 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T11:02:26,719 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1722): Closing d5ba0cd50015a1edf9d0cc5f3cb70060, disabling compactions & flushes 2024-12-09T11:02:26,719 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1733742135607.d5ba0cd50015a1edf9d0cc5f3cb70060. 2024-12-09T11:02:26,719 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1733742135607.d5ba0cd50015a1edf9d0cc5f3cb70060. 2024-12-09T11:02:26,719 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1733742135607.d5ba0cd50015a1edf9d0cc5f3cb70060. after waiting 0 ms 2024-12-09T11:02:26,719 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1733742135607.d5ba0cd50015a1edf9d0cc5f3cb70060. 2024-12-09T11:02:26,722 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testEmptyExportFileSystemState/1f4c77cea0dcbd9f3a4c280a94cf65f7/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T11:02:26,722 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testEmptyExportFileSystemState/d5ba0cd50015a1edf9d0cc5f3cb70060/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T11:02:26,722 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T11:02:26,722 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1733742135607.1f4c77cea0dcbd9f3a4c280a94cf65f7. 2024-12-09T11:02:26,722 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1676): Region close journal for 1f4c77cea0dcbd9f3a4c280a94cf65f7: Waiting for close lock at 1733742146718Running coprocessor pre-close hooks at 1733742146718Disabling compacts and flushes for region at 1733742146718Disabling writes for close at 1733742146718Writing region close event to WAL at 1733742146719 (+1 ms)Running coprocessor post-close hooks at 1733742146722 (+3 ms)Closed at 1733742146722 2024-12-09T11:02:26,723 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T11:02:26,723 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1733742135607.d5ba0cd50015a1edf9d0cc5f3cb70060. 2024-12-09T11:02:26,723 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1676): Region close journal for d5ba0cd50015a1edf9d0cc5f3cb70060: Waiting for close lock at 1733742146719Running coprocessor pre-close hooks at 1733742146719Disabling compacts and flushes for region at 1733742146719Disabling writes for close at 1733742146719Writing region close event to WAL at 1733742146720 (+1 ms)Running coprocessor post-close hooks at 1733742146722 (+2 ms)Closed at 1733742146723 (+1 ms) 2024-12-09T11:02:26,724 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] handler.UnassignRegionHandler(157): Closed 1f4c77cea0dcbd9f3a4c280a94cf65f7 2024-12-09T11:02:26,724 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=211 updating hbase:meta row=1f4c77cea0dcbd9f3a4c280a94cf65f7, regionState=CLOSED 2024-12-09T11:02:26,724 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] handler.UnassignRegionHandler(157): Closed d5ba0cd50015a1edf9d0cc5f3cb70060 2024-12-09T11:02:26,725 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=212 updating hbase:meta row=d5ba0cd50015a1edf9d0cc5f3cb70060, regionState=CLOSED 2024-12-09T11:02:26,726 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=213, ppid=211, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1f4c77cea0dcbd9f3a4c280a94cf65f7, server=3469f9ca0af3,39691,1733741766880 because future has completed 2024-12-09T11:02:26,727 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=214, ppid=212, state=RUNNABLE, hasLock=false; CloseRegionProcedure d5ba0cd50015a1edf9d0cc5f3cb70060, server=3469f9ca0af3,33293,1733741767044 because future has completed 2024-12-09T11:02:26,731 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=213, resume processing ppid=211 2024-12-09T11:02:26,731 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=213, ppid=211, state=SUCCESS, hasLock=false; CloseRegionProcedure 1f4c77cea0dcbd9f3a4c280a94cf65f7, server=3469f9ca0af3,39691,1733741766880 in 161 msec 2024-12-09T11:02:26,732 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=214, resume processing ppid=212 2024-12-09T11:02:26,732 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=214, ppid=212, state=SUCCESS, hasLock=false; CloseRegionProcedure d5ba0cd50015a1edf9d0cc5f3cb70060, server=3469f9ca0af3,33293,1733741767044 in 164 msec 2024-12-09T11:02:26,732 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=211, ppid=210, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=1f4c77cea0dcbd9f3a4c280a94cf65f7, UNASSIGN in 169 msec 2024-12-09T11:02:26,733 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=212, resume processing ppid=210 2024-12-09T11:02:26,733 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=212, ppid=210, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=d5ba0cd50015a1edf9d0cc5f3cb70060, UNASSIGN in 170 msec 2024-12-09T11:02:26,735 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=210, resume processing ppid=209 2024-12-09T11:02:26,735 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=210, ppid=209, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState in 173 msec 2024-12-09T11:02:26,736 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733742146736"}]},"ts":"1733742146736"} 2024-12-09T11:02:26,738 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLED in hbase:meta 2024-12-09T11:02:26,738 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testEmptyExportFileSystemState to state=DISABLED 2024-12-09T11:02:26,740 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=209, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState in 182 msec 2024-12-09T11:02:26,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=209 2024-12-09T11:02:26,872 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-09T11:02:26,872 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testEmptyExportFileSystemState 2024-12-09T11:02:26,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=215, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-09T11:02:26,874 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=215, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-09T11:02:26,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testEmptyExportFileSystemState 2024-12-09T11:02:26,875 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=215, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-09T11:02:26,876 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39691 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testEmptyExportFileSystemState 2024-12-09T11:02:26,878 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testEmptyExportFileSystemState/d5ba0cd50015a1edf9d0cc5f3cb70060 2024-12-09T11:02:26,878 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testEmptyExportFileSystemState/1f4c77cea0dcbd9f3a4c280a94cf65f7 2024-12-09T11:02:26,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-09T11:02:26,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-09T11:02:26,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-09T11:02:26,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-09T11:02:26,880 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testEmptyExportFileSystemState/d5ba0cd50015a1edf9d0cc5f3cb70060/cf, FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testEmptyExportFileSystemState/d5ba0cd50015a1edf9d0cc5f3cb70060/recovered.edits] 2024-12-09T11:02:26,880 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testEmptyExportFileSystemState/1f4c77cea0dcbd9f3a4c280a94cf65f7/cf, FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testEmptyExportFileSystemState/1f4c77cea0dcbd9f3a4c280a94cf65f7/recovered.edits] 2024-12-09T11:02:26,880 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-09T11:02:26,882 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-09T11:02:26,882 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:02:26,882 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:02:26,882 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:02:26,882 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:02:26,882 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data null 2024-12-09T11:02:26,882 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-09T11:02:26,882 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data null 2024-12-09T11:02:26,882 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data null 2024-12-09T11:02:26,882 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-09T11:02:26,882 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-09T11:02:26,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=215 2024-12-09T11:02:26,883 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T11:02:26,884 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T11:02:26,884 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T11:02:26,884 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T11:02:26,885 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testEmptyExportFileSystemState/d5ba0cd50015a1edf9d0cc5f3cb70060/cf/9c1b88b18eec4f45ab3b6c0f51036436 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testEmptyExportFileSystemState/d5ba0cd50015a1edf9d0cc5f3cb70060/cf/9c1b88b18eec4f45ab3b6c0f51036436 2024-12-09T11:02:26,885 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testEmptyExportFileSystemState/1f4c77cea0dcbd9f3a4c280a94cf65f7/cf/1de029ace4d54cf1a0817af15494087a to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testEmptyExportFileSystemState/1f4c77cea0dcbd9f3a4c280a94cf65f7/cf/1de029ace4d54cf1a0817af15494087a 2024-12-09T11:02:26,887 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testEmptyExportFileSystemState/1f4c77cea0dcbd9f3a4c280a94cf65f7/recovered.edits/9.seqid to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testEmptyExportFileSystemState/1f4c77cea0dcbd9f3a4c280a94cf65f7/recovered.edits/9.seqid 2024-12-09T11:02:26,887 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testEmptyExportFileSystemState/d5ba0cd50015a1edf9d0cc5f3cb70060/recovered.edits/9.seqid to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testEmptyExportFileSystemState/d5ba0cd50015a1edf9d0cc5f3cb70060/recovered.edits/9.seqid 2024-12-09T11:02:26,888 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testEmptyExportFileSystemState/1f4c77cea0dcbd9f3a4c280a94cf65f7 2024-12-09T11:02:26,888 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testEmptyExportFileSystemState/d5ba0cd50015a1edf9d0cc5f3cb70060 2024-12-09T11:02:26,888 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testEmptyExportFileSystemState regions 2024-12-09T11:02:26,888 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0 2024-12-09T11:02:26,889 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf] 2024-12-09T11:02:26,892 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241209b874519ecdb24f96b81ac07b6a189fe6_d5ba0cd50015a1edf9d0cc5f3cb70060 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241209b874519ecdb24f96b81ac07b6a189fe6_d5ba0cd50015a1edf9d0cc5f3cb70060 2024-12-09T11:02:26,893 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e2024120949315bdb607d4517959e777ce6c6f31a_1f4c77cea0dcbd9f3a4c280a94cf65f7 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e2024120949315bdb607d4517959e777ce6c6f31a_1f4c77cea0dcbd9f3a4c280a94cf65f7 2024-12-09T11:02:26,893 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0 2024-12-09T11:02:26,895 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=215, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-09T11:02:26,898 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testEmptyExportFileSystemState from hbase:meta 2024-12-09T11:02:26,900 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testEmptyExportFileSystemState' descriptor. 2024-12-09T11:02:26,901 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=215, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-09T11:02:26,902 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testEmptyExportFileSystemState' from region states. 2024-12-09T11:02:26,902 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,,1733742135607.1f4c77cea0dcbd9f3a4c280a94cf65f7.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733742146902"}]},"ts":"9223372036854775807"} 2024-12-09T11:02:26,902 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,1,1733742135607.d5ba0cd50015a1edf9d0cc5f3cb70060.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733742146902"}]},"ts":"9223372036854775807"} 2024-12-09T11:02:26,904 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-09T11:02:26,904 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 1f4c77cea0dcbd9f3a4c280a94cf65f7, NAME => 'testtb-testEmptyExportFileSystemState,,1733742135607.1f4c77cea0dcbd9f3a4c280a94cf65f7.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => d5ba0cd50015a1edf9d0cc5f3cb70060, NAME => 'testtb-testEmptyExportFileSystemState,1,1733742135607.d5ba0cd50015a1edf9d0cc5f3cb70060.', STARTKEY => '1', ENDKEY => ''}] 2024-12-09T11:02:26,904 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testEmptyExportFileSystemState' as deleted. 2024-12-09T11:02:26,905 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733742146904"}]},"ts":"9223372036854775807"} 2024-12-09T11:02:26,906 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testEmptyExportFileSystemState state from META 2024-12-09T11:02:26,907 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=215, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-09T11:02:26,908 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=215, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState in 35 msec 2024-12-09T11:02:26,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=215 2024-12-09T11:02:26,992 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testEmptyExportFileSystemState 2024-12-09T11:02:26,992 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-09T11:02:26,998 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-12-09T11:02:27,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testEmptyExportFileSystemState 2024-12-09T11:02:27,001 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-12-09T11:02:27,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testEmptyExportFileSystemState 2024-12-09T11:02:27,024 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testEmptyExportFileSystemState Thread=825 (was 814) Potentially hanging thread: ApplicationMasterLauncher #14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1359318497) connection to localhost/127.0.0.1:33425 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-23 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1359318497) connection to localhost/127.0.0.1:37317 from appattempt_1733741775522_0008_000001 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_983603010_1 at /127.0.0.1:33400 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37317 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1751713061_22 at /127.0.0.1:46352 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1751713061_22 at /127.0.0.1:38004 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_983603010_1 at /127.0.0.1:37990 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1359318497) connection to localhost/127.0.0.1:33323 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1751713061_22 at /127.0.0.1:33430 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 23942) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-7512 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33323 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=833 (was 813) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=840 (was 919), ProcessCount=17 (was 14) - ProcessCount LEAK? -, AvailableMemoryMB=2338 (was 2815) 2024-12-09T11:02:27,024 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=825 is superior to 500 2024-12-09T11:02:27,041 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportWithChecksum Thread=825, OpenFileDescriptor=833, MaxFileDescriptor=1048576, SystemLoadAverage=840, ProcessCount=17, AvailableMemoryMB=2337 2024-12-09T11:02:27,041 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=825 is superior to 500 2024-12-09T11:02:27,042 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T11:02:27,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=216, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum 2024-12-09T11:02:27,044 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=216, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T11:02:27,044 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithChecksum" procId is: 216 2024-12-09T11:02:27,045 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=216, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T11:02:27,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=216 2024-12-09T11:02:27,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742328_1504 (size=440) 2024-12-09T11:02:27,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742328_1504 (size=440) 2024-12-09T11:02:27,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742328_1504 (size=440) 2024-12-09T11:02:27,052 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 770bc39038dbb128996bd53b724a4854, NAME => 'testtb-testExportWithChecksum,,1733742147042.770bc39038dbb128996bd53b724a4854.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T11:02:27,053 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 618772b2136080eaf54ed02f04247e75, NAME => 'testtb-testExportWithChecksum,1,1733742147042.618772b2136080eaf54ed02f04247e75.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T11:02:27,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742329_1505 (size=65) 2024-12-09T11:02:27,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742330_1506 (size=65) 2024-12-09T11:02:27,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742329_1505 (size=65) 2024-12-09T11:02:27,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742329_1505 (size=65) 2024-12-09T11:02:27,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742330_1506 (size=65) 2024-12-09T11:02:27,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742330_1506 (size=65) 2024-12-09T11:02:27,060 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1733742147042.618772b2136080eaf54ed02f04247e75.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:02:27,060 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1733742147042.770bc39038dbb128996bd53b724a4854.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:02:27,060 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1722): Closing 618772b2136080eaf54ed02f04247e75, disabling compactions & flushes 2024-12-09T11:02:27,060 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1733742147042.618772b2136080eaf54ed02f04247e75. 2024-12-09T11:02:27,060 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1722): Closing 770bc39038dbb128996bd53b724a4854, disabling compactions & flushes 2024-12-09T11:02:27,060 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1733742147042.618772b2136080eaf54ed02f04247e75. 2024-12-09T11:02:27,060 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1733742147042.770bc39038dbb128996bd53b724a4854. 2024-12-09T11:02:27,060 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1733742147042.618772b2136080eaf54ed02f04247e75. after waiting 0 ms 2024-12-09T11:02:27,060 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1733742147042.770bc39038dbb128996bd53b724a4854. 2024-12-09T11:02:27,060 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1733742147042.618772b2136080eaf54ed02f04247e75. 2024-12-09T11:02:27,060 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1733742147042.770bc39038dbb128996bd53b724a4854. after waiting 0 ms 2024-12-09T11:02:27,060 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1733742147042.618772b2136080eaf54ed02f04247e75. 2024-12-09T11:02:27,060 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1733742147042.770bc39038dbb128996bd53b724a4854. 2024-12-09T11:02:27,060 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1733742147042.770bc39038dbb128996bd53b724a4854. 2024-12-09T11:02:27,060 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1676): Region close journal for 618772b2136080eaf54ed02f04247e75: Waiting for close lock at 1733742147060Disabling compacts and flushes for region at 1733742147060Disabling writes for close at 1733742147060Writing region close event to WAL at 1733742147060Closed at 1733742147060 2024-12-09T11:02:27,060 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1676): Region close journal for 770bc39038dbb128996bd53b724a4854: Waiting for close lock at 1733742147060Disabling compacts and flushes for region at 1733742147060Disabling writes for close at 1733742147060Writing region close event to WAL at 1733742147060Closed at 1733742147060 2024-12-09T11:02:27,061 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=216, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T11:02:27,061 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,1,1733742147042.618772b2136080eaf54ed02f04247e75.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733742147061"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733742147061"}]},"ts":"1733742147061"} 2024-12-09T11:02:27,062 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,,1733742147042.770bc39038dbb128996bd53b724a4854.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733742147061"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733742147061"}]},"ts":"1733742147061"} 2024-12-09T11:02:27,064 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-09T11:02:27,064 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=216, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T11:02:27,065 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733742147064"}]},"ts":"1733742147064"} 2024-12-09T11:02:27,066 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLING in hbase:meta 2024-12-09T11:02:27,066 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {3469f9ca0af3=0} racks are {/default-rack=0} 2024-12-09T11:02:27,067 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T11:02:27,067 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T11:02:27,067 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T11:02:27,067 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T11:02:27,067 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T11:02:27,067 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T11:02:27,067 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T11:02:27,067 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T11:02:27,067 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T11:02:27,067 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T11:02:27,068 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=217, ppid=216, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=770bc39038dbb128996bd53b724a4854, ASSIGN}, {pid=218, ppid=216, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=618772b2136080eaf54ed02f04247e75, ASSIGN}] 2024-12-09T11:02:27,068 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=218, ppid=216, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=618772b2136080eaf54ed02f04247e75, ASSIGN 2024-12-09T11:02:27,068 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=217, ppid=216, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=770bc39038dbb128996bd53b724a4854, ASSIGN 2024-12-09T11:02:27,069 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=218, ppid=216, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=618772b2136080eaf54ed02f04247e75, ASSIGN; state=OFFLINE, location=3469f9ca0af3,39691,1733741766880; forceNewPlan=false, retain=false 2024-12-09T11:02:27,069 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=217, ppid=216, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=770bc39038dbb128996bd53b724a4854, ASSIGN; state=OFFLINE, location=3469f9ca0af3,33293,1733741767044; forceNewPlan=false, retain=false 2024-12-09T11:02:27,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=216 2024-12-09T11:02:27,220 INFO [3469f9ca0af3:35815 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-09T11:02:27,220 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=218 updating hbase:meta row=618772b2136080eaf54ed02f04247e75, regionState=OPENING, regionLocation=3469f9ca0af3,39691,1733741766880 2024-12-09T11:02:27,220 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=217 updating hbase:meta row=770bc39038dbb128996bd53b724a4854, regionState=OPENING, regionLocation=3469f9ca0af3,33293,1733741767044 2024-12-09T11:02:27,222 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=218, ppid=216, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=618772b2136080eaf54ed02f04247e75, ASSIGN because future has completed 2024-12-09T11:02:27,222 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=219, ppid=218, state=RUNNABLE, hasLock=false; OpenRegionProcedure 618772b2136080eaf54ed02f04247e75, server=3469f9ca0af3,39691,1733741766880}] 2024-12-09T11:02:27,222 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=217, ppid=216, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=770bc39038dbb128996bd53b724a4854, ASSIGN because future has completed 2024-12-09T11:02:27,223 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=220, ppid=217, state=RUNNABLE, hasLock=false; OpenRegionProcedure 770bc39038dbb128996bd53b724a4854, server=3469f9ca0af3,33293,1733741767044}] 2024-12-09T11:02:27,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=216 2024-12-09T11:02:27,376 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=219}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,1,1733742147042.618772b2136080eaf54ed02f04247e75. 2024-12-09T11:02:27,376 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=219}] regionserver.HRegion(7752): Opening region: {ENCODED => 618772b2136080eaf54ed02f04247e75, NAME => 'testtb-testExportWithChecksum,1,1733742147042.618772b2136080eaf54ed02f04247e75.', STARTKEY => '1', ENDKEY => ''} 2024-12-09T11:02:27,377 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=219}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,1,1733742147042.618772b2136080eaf54ed02f04247e75. service=AccessControlService 2024-12-09T11:02:27,377 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=219}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T11:02:27,377 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=219}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 618772b2136080eaf54ed02f04247e75 2024-12-09T11:02:27,377 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=219}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1733742147042.618772b2136080eaf54ed02f04247e75.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:02:27,377 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=219}] regionserver.HRegion(7794): checking encryption for 618772b2136080eaf54ed02f04247e75 2024-12-09T11:02:27,377 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=219}] regionserver.HRegion(7797): checking classloading for 618772b2136080eaf54ed02f04247e75 2024-12-09T11:02:27,378 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=220}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,,1733742147042.770bc39038dbb128996bd53b724a4854. 2024-12-09T11:02:27,378 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=220}] regionserver.HRegion(7752): Opening region: {ENCODED => 770bc39038dbb128996bd53b724a4854, NAME => 'testtb-testExportWithChecksum,,1733742147042.770bc39038dbb128996bd53b724a4854.', STARTKEY => '', ENDKEY => '1'} 2024-12-09T11:02:27,378 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=220}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,,1733742147042.770bc39038dbb128996bd53b724a4854. service=AccessControlService 2024-12-09T11:02:27,378 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=220}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T11:02:27,378 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=220}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 770bc39038dbb128996bd53b724a4854 2024-12-09T11:02:27,378 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=220}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1733742147042.770bc39038dbb128996bd53b724a4854.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:02:27,379 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=220}] regionserver.HRegion(7794): checking encryption for 770bc39038dbb128996bd53b724a4854 2024-12-09T11:02:27,379 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=220}] regionserver.HRegion(7797): checking classloading for 770bc39038dbb128996bd53b724a4854 2024-12-09T11:02:27,379 INFO [StoreOpener-618772b2136080eaf54ed02f04247e75-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 618772b2136080eaf54ed02f04247e75 2024-12-09T11:02:27,380 INFO [StoreOpener-770bc39038dbb128996bd53b724a4854-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 770bc39038dbb128996bd53b724a4854 2024-12-09T11:02:27,380 INFO [StoreOpener-618772b2136080eaf54ed02f04247e75-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 618772b2136080eaf54ed02f04247e75 columnFamilyName cf 2024-12-09T11:02:27,381 DEBUG [StoreOpener-618772b2136080eaf54ed02f04247e75-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:02:27,381 INFO [StoreOpener-770bc39038dbb128996bd53b724a4854-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 770bc39038dbb128996bd53b724a4854 columnFamilyName cf 2024-12-09T11:02:27,381 INFO [StoreOpener-618772b2136080eaf54ed02f04247e75-1 {}] regionserver.HStore(327): Store=618772b2136080eaf54ed02f04247e75/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:02:27,381 DEBUG [StoreOpener-770bc39038dbb128996bd53b724a4854-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:02:27,381 INFO [StoreOpener-770bc39038dbb128996bd53b724a4854-1 {}] regionserver.HStore(327): Store=770bc39038dbb128996bd53b724a4854/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:02:27,381 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=219}] regionserver.HRegion(1038): replaying wal for 618772b2136080eaf54ed02f04247e75 2024-12-09T11:02:27,382 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=220}] regionserver.HRegion(1038): replaying wal for 770bc39038dbb128996bd53b724a4854 2024-12-09T11:02:27,382 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=219}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithChecksum/618772b2136080eaf54ed02f04247e75 2024-12-09T11:02:27,382 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=219}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithChecksum/618772b2136080eaf54ed02f04247e75 2024-12-09T11:02:27,382 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=220}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithChecksum/770bc39038dbb128996bd53b724a4854 2024-12-09T11:02:27,382 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=219}] regionserver.HRegion(1048): stopping wal replay for 618772b2136080eaf54ed02f04247e75 2024-12-09T11:02:27,382 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=219}] regionserver.HRegion(1060): Cleaning up temporary data for 618772b2136080eaf54ed02f04247e75 2024-12-09T11:02:27,383 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=220}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithChecksum/770bc39038dbb128996bd53b724a4854 2024-12-09T11:02:27,383 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=220}] regionserver.HRegion(1048): stopping wal replay for 770bc39038dbb128996bd53b724a4854 2024-12-09T11:02:27,383 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=220}] regionserver.HRegion(1060): Cleaning up temporary data for 770bc39038dbb128996bd53b724a4854 2024-12-09T11:02:27,384 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=219}] regionserver.HRegion(1093): writing seq id for 618772b2136080eaf54ed02f04247e75 2024-12-09T11:02:27,384 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=220}] regionserver.HRegion(1093): writing seq id for 770bc39038dbb128996bd53b724a4854 2024-12-09T11:02:27,385 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=219}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithChecksum/618772b2136080eaf54ed02f04247e75/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:02:27,386 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=219}] regionserver.HRegion(1114): Opened 618772b2136080eaf54ed02f04247e75; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70871340, jitterRate=0.056065261363983154}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T11:02:27,386 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=219}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 618772b2136080eaf54ed02f04247e75 2024-12-09T11:02:27,386 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=220}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithChecksum/770bc39038dbb128996bd53b724a4854/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:02:27,386 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=219}] regionserver.HRegion(1006): Region open journal for 618772b2136080eaf54ed02f04247e75: Running coprocessor pre-open hook at 1733742147378Writing region info on filesystem at 1733742147378Initializing all the Stores at 1733742147378Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733742147378Cleaning up temporary data from old regions at 1733742147382 (+4 ms)Running coprocessor post-open hooks at 1733742147386 (+4 ms)Region opened successfully at 1733742147386 2024-12-09T11:02:27,386 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=220}] regionserver.HRegion(1114): Opened 770bc39038dbb128996bd53b724a4854; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74478842, jitterRate=0.10982123017311096}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T11:02:27,386 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=220}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 770bc39038dbb128996bd53b724a4854 2024-12-09T11:02:27,386 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=220}] regionserver.HRegion(1006): Region open journal for 770bc39038dbb128996bd53b724a4854: Running coprocessor pre-open hook at 1733742147379Writing region info on filesystem at 1733742147379Initializing all the Stores at 1733742147379Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733742147379Cleaning up temporary data from old regions at 1733742147383 (+4 ms)Running coprocessor post-open hooks at 1733742147386 (+3 ms)Region opened successfully at 1733742147386 2024-12-09T11:02:27,387 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=220}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,,1733742147042.770bc39038dbb128996bd53b724a4854., pid=220, masterSystemTime=1733742147376 2024-12-09T11:02:27,387 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=219}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,1,1733742147042.618772b2136080eaf54ed02f04247e75., pid=219, masterSystemTime=1733742147374 2024-12-09T11:02:27,389 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=220}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,,1733742147042.770bc39038dbb128996bd53b724a4854. 2024-12-09T11:02:27,389 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=220}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,,1733742147042.770bc39038dbb128996bd53b724a4854. 2024-12-09T11:02:27,389 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=217 updating hbase:meta row=770bc39038dbb128996bd53b724a4854, regionState=OPEN, openSeqNum=2, regionLocation=3469f9ca0af3,33293,1733741767044 2024-12-09T11:02:27,389 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=219}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,1,1733742147042.618772b2136080eaf54ed02f04247e75. 2024-12-09T11:02:27,389 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=219}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,1,1733742147042.618772b2136080eaf54ed02f04247e75. 2024-12-09T11:02:27,390 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=218 updating hbase:meta row=618772b2136080eaf54ed02f04247e75, regionState=OPEN, openSeqNum=2, regionLocation=3469f9ca0af3,39691,1733741766880 2024-12-09T11:02:27,391 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=220, ppid=217, state=RUNNABLE, hasLock=false; OpenRegionProcedure 770bc39038dbb128996bd53b724a4854, server=3469f9ca0af3,33293,1733741767044 because future has completed 2024-12-09T11:02:27,392 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=219, ppid=218, state=RUNNABLE, hasLock=false; OpenRegionProcedure 618772b2136080eaf54ed02f04247e75, server=3469f9ca0af3,39691,1733741766880 because future has completed 2024-12-09T11:02:27,393 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=220, resume processing ppid=217 2024-12-09T11:02:27,393 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=220, ppid=217, state=SUCCESS, hasLock=false; OpenRegionProcedure 770bc39038dbb128996bd53b724a4854, server=3469f9ca0af3,33293,1733741767044 in 169 msec 2024-12-09T11:02:27,394 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=217, ppid=216, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=770bc39038dbb128996bd53b724a4854, ASSIGN in 326 msec 2024-12-09T11:02:27,394 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=219, resume processing ppid=218 2024-12-09T11:02:27,394 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=219, ppid=218, state=SUCCESS, hasLock=false; OpenRegionProcedure 618772b2136080eaf54ed02f04247e75, server=3469f9ca0af3,39691,1733741766880 in 171 msec 2024-12-09T11:02:27,396 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=218, resume processing ppid=216 2024-12-09T11:02:27,396 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=218, ppid=216, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=618772b2136080eaf54ed02f04247e75, ASSIGN in 327 msec 2024-12-09T11:02:27,397 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=216, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T11:02:27,397 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733742147397"}]},"ts":"1733742147397"} 2024-12-09T11:02:27,399 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLED in hbase:meta 2024-12-09T11:02:27,399 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=216, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T11:02:27,400 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithChecksum jenkins: RWXCA 2024-12-09T11:02:27,402 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39691 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-09T11:02:27,404 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:02:27,404 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:02:27,404 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:02:27,404 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:02:27,406 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T11:02:27,406 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T11:02:27,407 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-09T11:02:27,407 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-09T11:02:27,407 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T11:02:27,407 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T11:02:27,407 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-09T11:02:27,407 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-09T11:02:27,407 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=216, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum in 364 msec 2024-12-09T11:02:27,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=216 2024-12-09T11:02:27,672 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithChecksum completed 2024-12-09T11:02:27,673 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-09T11:02:27,675 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithChecksum 2024-12-09T11:02:27,675 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithChecksum,,1733742147042.770bc39038dbb128996bd53b724a4854. 2024-12-09T11:02:27,675 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T11:02:27,677 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-09T11:02:27,681 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-09T11:02:27,686 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-09T11:02:27,688 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-09T11:02:27,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733742147688 (current time:1733742147688). 2024-12-09T11:02:27,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T11:02:27,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-09T11:02:27,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T11:02:27,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d013435, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:02:27,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T11:02:27,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T11:02:27,690 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T11:02:27,690 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T11:02:27,690 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T11:02:27,690 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@798a0fe3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:02:27,690 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T11:02:27,691 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T11:02:27,691 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:02:27,692 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34368, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T11:02:27,692 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9975185, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:02:27,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:02:27,693 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,33293,1733741767044, seqNum=-1] 2024-12-09T11:02:27,694 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:02:27,694 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41288, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:02:27,696 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815. 2024-12-09T11:02:27,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T11:02:27,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:02:27,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:02:27,696 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T11:02:27,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@303a50d1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:02:27,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T11:02:27,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T11:02:27,697 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T11:02:27,697 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T11:02:27,697 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T11:02:27,697 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@227dae26, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:02:27,698 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T11:02:27,698 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T11:02:27,698 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:02:27,698 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34386, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T11:02:27,699 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25f44191, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:02:27,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:02:27,700 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,33293,1733741767044, seqNum=-1] 2024-12-09T11:02:27,700 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:02:27,701 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41304, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:02:27,702 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb., hostname=3469f9ca0af3,39691,1733741766880, seqNum=2] 2024-12-09T11:02:27,702 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:02:27,703 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50732, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:02:27,704 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815. 2024-12-09T11:02:27,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T11:02:27,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:02:27,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:02:27,704 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T11:02:27,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-09T11:02:27,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T11:02:27,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=221, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-09T11:02:27,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 221 2024-12-09T11:02:27,706 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T11:02:27,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-12-09T11:02:27,707 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T11:02:27,709 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T11:02:27,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742331_1507 (size=161) 2024-12-09T11:02:27,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742331_1507 (size=161) 2024-12-09T11:02:27,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742331_1507 (size=161) 2024-12-09T11:02:27,715 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T11:02:27,715 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 770bc39038dbb128996bd53b724a4854}, {pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 618772b2136080eaf54ed02f04247e75}] 2024-12-09T11:02:27,716 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 770bc39038dbb128996bd53b724a4854 2024-12-09T11:02:27,716 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 618772b2136080eaf54ed02f04247e75 2024-12-09T11:02:27,737 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-12-09T11:02:27,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-12-09T11:02:27,867 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39691 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=223 2024-12-09T11:02:27,867 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33293 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=222 2024-12-09T11:02:27,868 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1733742147042.770bc39038dbb128996bd53b724a4854. 2024-12-09T11:02:27,868 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1733742147042.618772b2136080eaf54ed02f04247e75. 2024-12-09T11:02:27,868 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(2603): Flush status journal for 770bc39038dbb128996bd53b724a4854: 2024-12-09T11:02:27,868 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(2603): Flush status journal for 618772b2136080eaf54ed02f04247e75: 2024-12-09T11:02:27,868 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1733742147042.770bc39038dbb128996bd53b724a4854. for emptySnaptb0-testExportWithChecksum completed. 2024-12-09T11:02:27,868 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1733742147042.618772b2136080eaf54ed02f04247e75. for emptySnaptb0-testExportWithChecksum completed. 2024-12-09T11:02:27,868 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1733742147042.618772b2136080eaf54ed02f04247e75.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-09T11:02:27,868 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1733742147042.770bc39038dbb128996bd53b724a4854.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-09T11:02:27,868 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T11:02:27,868 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T11:02:27,868 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-09T11:02:27,868 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-09T11:02:27,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742333_1509 (size=68) 2024-12-09T11:02:27,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742332_1508 (size=68) 2024-12-09T11:02:27,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742333_1509 (size=68) 2024-12-09T11:02:27,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742332_1508 (size=68) 2024-12-09T11:02:27,875 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1733742147042.770bc39038dbb128996bd53b724a4854. 2024-12-09T11:02:27,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742333_1509 (size=68) 2024-12-09T11:02:27,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742332_1508 (size=68) 2024-12-09T11:02:27,875 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=222 2024-12-09T11:02:27,876 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1733742147042.618772b2136080eaf54ed02f04247e75. 2024-12-09T11:02:27,876 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=223 2024-12-09T11:02:27,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=222 2024-12-09T11:02:27,876 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 770bc39038dbb128996bd53b724a4854 2024-12-09T11:02:27,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=223 2024-12-09T11:02:27,876 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 618772b2136080eaf54ed02f04247e75 2024-12-09T11:02:27,876 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 770bc39038dbb128996bd53b724a4854 2024-12-09T11:02:27,876 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 618772b2136080eaf54ed02f04247e75 2024-12-09T11:02:27,878 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=222, ppid=221, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 770bc39038dbb128996bd53b724a4854 in 162 msec 2024-12-09T11:02:27,879 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=223, resume processing ppid=221 2024-12-09T11:02:27,879 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=223, ppid=221, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 618772b2136080eaf54ed02f04247e75 in 162 msec 2024-12-09T11:02:27,879 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T11:02:27,879 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T11:02:27,880 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-09T11:02:27,880 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-09T11:02:27,880 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:02:27,881 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-09T11:02:27,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742334_1510 (size=60) 2024-12-09T11:02:27,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742334_1510 (size=60) 2024-12-09T11:02:27,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742334_1510 (size=60) 2024-12-09T11:02:27,888 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T11:02:27,889 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithChecksum 2024-12-09T11:02:27,889 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum 2024-12-09T11:02:27,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742335_1511 (size=641) 2024-12-09T11:02:27,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742335_1511 (size=641) 2024-12-09T11:02:27,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742335_1511 (size=641) 2024-12-09T11:02:27,901 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T11:02:27,905 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T11:02:27,905 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/emptySnaptb0-testExportWithChecksum 2024-12-09T11:02:27,906 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T11:02:27,906 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 221 2024-12-09T11:02:27,907 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=221, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 201 msec 2024-12-09T11:02:28,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-12-09T11:02:28,023 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-12-09T11:02:28,028 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33293 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,,1733742147042.770bc39038dbb128996bd53b724a4854. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T11:02:28,029 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39691 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,1,1733742147042.618772b2136080eaf54ed02f04247e75. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T11:02:28,029 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-09T11:02:28,032 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithChecksum 2024-12-09T11:02:28,032 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithChecksum,,1733742147042.770bc39038dbb128996bd53b724a4854. 2024-12-09T11:02:28,032 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T11:02:28,033 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-09T11:02:28,037 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-09T11:02:28,041 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-09T11:02:28,043 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-09T11:02:28,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733742148043 (current time:1733742148043). 2024-12-09T11:02:28,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T11:02:28,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-09T11:02:28,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T11:02:28,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@458dac7a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:02:28,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T11:02:28,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T11:02:28,044 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T11:02:28,044 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T11:02:28,045 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T11:02:28,045 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@71732936, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:02:28,045 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T11:02:28,045 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T11:02:28,045 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:02:28,046 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34402, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T11:02:28,046 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15a2644a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:02:28,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:02:28,047 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,33293,1733741767044, seqNum=-1] 2024-12-09T11:02:28,047 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:02:28,048 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41314, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:02:28,049 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815. 2024-12-09T11:02:28,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T11:02:28,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:02:28,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:02:28,049 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T11:02:28,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4da9bc82, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:02:28,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T11:02:28,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T11:02:28,050 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T11:02:28,050 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T11:02:28,050 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T11:02:28,050 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@39eda637, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:02:28,051 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T11:02:28,051 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T11:02:28,051 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:02:28,051 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34420, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T11:02:28,052 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24a9ae2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:02:28,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:02:28,053 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,33293,1733741767044, seqNum=-1] 2024-12-09T11:02:28,053 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:02:28,053 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41318, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:02:28,055 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb., hostname=3469f9ca0af3,39691,1733741766880, seqNum=2] 2024-12-09T11:02:28,055 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:02:28,056 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50744, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:02:28,057 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815. 2024-12-09T11:02:28,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T11:02:28,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:02:28,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:02:28,057 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T11:02:28,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-09T11:02:28,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T11:02:28,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=224, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=224, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-09T11:02:28,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 224 2024-12-09T11:02:28,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=224 2024-12-09T11:02:28,059 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=224, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=224, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T11:02:28,060 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=224, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=224, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T11:02:28,062 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=224, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=224, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T11:02:28,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742336_1512 (size=156) 2024-12-09T11:02:28,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742336_1512 (size=156) 2024-12-09T11:02:28,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742336_1512 (size=156) 2024-12-09T11:02:28,071 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=224, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=224, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T11:02:28,071 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=225, ppid=224, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 770bc39038dbb128996bd53b724a4854}, {pid=226, ppid=224, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 618772b2136080eaf54ed02f04247e75}] 2024-12-09T11:02:28,072 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=226, ppid=224, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 618772b2136080eaf54ed02f04247e75 2024-12-09T11:02:28,072 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=225, ppid=224, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 770bc39038dbb128996bd53b724a4854 2024-12-09T11:02:28,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=224 2024-12-09T11:02:28,224 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33293 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=225 2024-12-09T11:02:28,224 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39691 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=226 2024-12-09T11:02:28,224 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1733742147042.618772b2136080eaf54ed02f04247e75. 2024-12-09T11:02:28,224 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1733742147042.770bc39038dbb128996bd53b724a4854. 2024-12-09T11:02:28,224 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] regionserver.HRegion(2902): Flushing 618772b2136080eaf54ed02f04247e75 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-12-09T11:02:28,224 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] regionserver.HRegion(2902): Flushing 770bc39038dbb128996bd53b724a4854 1/1 column families, dataSize=333 B heapSize=976 B 2024-12-09T11:02:28,245 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209528ae9c724a845f2afcb59a9664a91ef_770bc39038dbb128996bd53b724a4854 is 71, key is 068cfbb743de3f171d221c193b197648/cf:q/1733742148027/Put/seqid=0 2024-12-09T11:02:28,245 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202412092af6bd6e01344a5cbf1268c8beaa971c_618772b2136080eaf54ed02f04247e75 is 71, key is 206d50c402756e05015f1569a24f8f31/cf:q/1733742148029/Put/seqid=0 2024-12-09T11:02:28,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742338_1514 (size=8032) 2024-12-09T11:02:28,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742338_1514 (size=8032) 2024-12-09T11:02:28,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742338_1514 (size=8032) 2024-12-09T11:02:28,255 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:02:28,261 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202412092af6bd6e01344a5cbf1268c8beaa971c_618772b2136080eaf54ed02f04247e75 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b202412092af6bd6e01344a5cbf1268c8beaa971c_618772b2136080eaf54ed02f04247e75 2024-12-09T11:02:28,262 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithChecksum/618772b2136080eaf54ed02f04247e75/.tmp/cf/95d3a2dfd2ff44b6ba46f99bc9257fe2, store: [table=testtb-testExportWithChecksum family=cf region=618772b2136080eaf54ed02f04247e75] 2024-12-09T11:02:28,263 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithChecksum/618772b2136080eaf54ed02f04247e75/.tmp/cf/95d3a2dfd2ff44b6ba46f99bc9257fe2 is 206, key is 16f06ef06308a7027e9570fb9f91eb7f2/cf:q/1733742148029/Put/seqid=0 2024-12-09T11:02:28,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742337_1513 (size=5242) 2024-12-09T11:02:28,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742337_1513 (size=5242) 2024-12-09T11:02:28,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742337_1513 (size=5242) 2024-12-09T11:02:28,277 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:02:28,281 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209528ae9c724a845f2afcb59a9664a91ef_770bc39038dbb128996bd53b724a4854 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241209528ae9c724a845f2afcb59a9664a91ef_770bc39038dbb128996bd53b724a4854 2024-12-09T11:02:28,283 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithChecksum/770bc39038dbb128996bd53b724a4854/.tmp/cf/95ac9f55b56a4cf58c26f866bfbb1128, store: [table=testtb-testExportWithChecksum family=cf region=770bc39038dbb128996bd53b724a4854] 2024-12-09T11:02:28,284 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithChecksum/770bc39038dbb128996bd53b724a4854/.tmp/cf/95ac9f55b56a4cf58c26f866bfbb1128 is 206, key is 029a6609caa60c0de0518afccc380a325/cf:q/1733742148027/Put/seqid=0 2024-12-09T11:02:28,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742339_1515 (size=14451) 2024-12-09T11:02:28,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742339_1515 (size=14451) 2024-12-09T11:02:28,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742339_1515 (size=14451) 2024-12-09T11:02:28,295 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithChecksum/618772b2136080eaf54ed02f04247e75/.tmp/cf/95d3a2dfd2ff44b6ba46f99bc9257fe2 2024-12-09T11:02:28,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742340_1516 (size=6310) 2024-12-09T11:02:28,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742340_1516 (size=6310) 2024-12-09T11:02:28,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742340_1516 (size=6310) 2024-12-09T11:02:28,297 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=333, hasBloomFilter=true, into tmp file hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithChecksum/770bc39038dbb128996bd53b724a4854/.tmp/cf/95ac9f55b56a4cf58c26f866bfbb1128 2024-12-09T11:02:28,301 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithChecksum/618772b2136080eaf54ed02f04247e75/.tmp/cf/95d3a2dfd2ff44b6ba46f99bc9257fe2 as hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithChecksum/618772b2136080eaf54ed02f04247e75/cf/95d3a2dfd2ff44b6ba46f99bc9257fe2 2024-12-09T11:02:28,301 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithChecksum/770bc39038dbb128996bd53b724a4854/.tmp/cf/95ac9f55b56a4cf58c26f866bfbb1128 as hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithChecksum/770bc39038dbb128996bd53b724a4854/cf/95ac9f55b56a4cf58c26f866bfbb1128 2024-12-09T11:02:28,305 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithChecksum/618772b2136080eaf54ed02f04247e75/cf/95d3a2dfd2ff44b6ba46f99bc9257fe2, entries=45, sequenceid=6, filesize=14.1 K 2024-12-09T11:02:28,306 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithChecksum/770bc39038dbb128996bd53b724a4854/cf/95ac9f55b56a4cf58c26f866bfbb1128, entries=5, sequenceid=6, filesize=6.2 K 2024-12-09T11:02:28,307 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] regionserver.HRegion(3140): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for 770bc39038dbb128996bd53b724a4854 in 83ms, sequenceid=6, compaction requested=false 2024-12-09T11:02:28,307 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] regionserver.HRegion(2603): Flush status journal for 770bc39038dbb128996bd53b724a4854: 2024-12-09T11:02:28,307 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1733742147042.770bc39038dbb128996bd53b724a4854. for snaptb0-testExportWithChecksum completed. 2024-12-09T11:02:28,307 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1733742147042.770bc39038dbb128996bd53b724a4854.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-09T11:02:28,307 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T11:02:28,307 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithChecksum/770bc39038dbb128996bd53b724a4854/cf/95ac9f55b56a4cf58c26f866bfbb1128] hfiles 2024-12-09T11:02:28,307 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithChecksum/770bc39038dbb128996bd53b724a4854/cf/95ac9f55b56a4cf58c26f866bfbb1128 for snapshot=snaptb0-testExportWithChecksum 2024-12-09T11:02:28,307 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] regionserver.HRegion(3140): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for 618772b2136080eaf54ed02f04247e75 in 83ms, sequenceid=6, compaction requested=false 2024-12-09T11:02:28,307 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] regionserver.HRegion(2603): Flush status journal for 618772b2136080eaf54ed02f04247e75: 2024-12-09T11:02:28,308 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1733742147042.618772b2136080eaf54ed02f04247e75. for snaptb0-testExportWithChecksum completed. 2024-12-09T11:02:28,308 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1733742147042.618772b2136080eaf54ed02f04247e75.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-09T11:02:28,308 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T11:02:28,308 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithChecksum/618772b2136080eaf54ed02f04247e75/cf/95d3a2dfd2ff44b6ba46f99bc9257fe2] hfiles 2024-12-09T11:02:28,308 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithChecksum/618772b2136080eaf54ed02f04247e75/cf/95d3a2dfd2ff44b6ba46f99bc9257fe2 for snapshot=snaptb0-testExportWithChecksum 2024-12-09T11:02:28,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742341_1517 (size=107) 2024-12-09T11:02:28,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742342_1518 (size=107) 2024-12-09T11:02:28,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742341_1517 (size=107) 2024-12-09T11:02:28,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742341_1517 (size=107) 2024-12-09T11:02:28,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742342_1518 (size=107) 2024-12-09T11:02:28,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742342_1518 (size=107) 2024-12-09T11:02:28,316 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1733742147042.770bc39038dbb128996bd53b724a4854. 2024-12-09T11:02:28,316 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=225 2024-12-09T11:02:28,316 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1733742147042.618772b2136080eaf54ed02f04247e75. 2024-12-09T11:02:28,316 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=226 2024-12-09T11:02:28,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=225 2024-12-09T11:02:28,316 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 770bc39038dbb128996bd53b724a4854 2024-12-09T11:02:28,316 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=225, ppid=224, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 770bc39038dbb128996bd53b724a4854 2024-12-09T11:02:28,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=226 2024-12-09T11:02:28,317 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 618772b2136080eaf54ed02f04247e75 2024-12-09T11:02:28,317 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=226, ppid=224, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 618772b2136080eaf54ed02f04247e75 2024-12-09T11:02:28,319 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=225, ppid=224, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 770bc39038dbb128996bd53b724a4854 in 247 msec 2024-12-09T11:02:28,320 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=226, resume processing ppid=224 2024-12-09T11:02:28,320 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=226, ppid=224, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 618772b2136080eaf54ed02f04247e75 in 247 msec 2024-12-09T11:02:28,320 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=224, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=224, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T11:02:28,321 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=224, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=224, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T11:02:28,322 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-09T11:02:28,322 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-09T11:02:28,322 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:02:28,323 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b202412092af6bd6e01344a5cbf1268c8beaa971c_618772b2136080eaf54ed02f04247e75, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241209528ae9c724a845f2afcb59a9664a91ef_770bc39038dbb128996bd53b724a4854] hfiles 2024-12-09T11:02:28,323 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b202412092af6bd6e01344a5cbf1268c8beaa971c_618772b2136080eaf54ed02f04247e75 2024-12-09T11:02:28,323 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241209528ae9c724a845f2afcb59a9664a91ef_770bc39038dbb128996bd53b724a4854 2024-12-09T11:02:28,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742343_1519 (size=291) 2024-12-09T11:02:28,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742343_1519 (size=291) 2024-12-09T11:02:28,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742343_1519 (size=291) 2024-12-09T11:02:28,332 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=224, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=224, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T11:02:28,332 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithChecksum 2024-12-09T11:02:28,333 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-09T11:02:28,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742344_1520 (size=951) 2024-12-09T11:02:28,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742344_1520 (size=951) 2024-12-09T11:02:28,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742344_1520 (size=951) 2024-12-09T11:02:28,347 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=224, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=224, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T11:02:28,358 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=224, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=224, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T11:02:28,358 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-09T11:02:28,360 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=224, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=224, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T11:02:28,360 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 224 2024-12-09T11:02:28,361 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=224, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=224, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 302 msec 2024-12-09T11:02:28,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=224 2024-12-09T11:02:28,372 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-12-09T11:02:28,373 INFO [Time-limited test {}] snapshot.TestExportSnapshot(523): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/local-export-1733742148373 2024-12-09T11:02:28,373 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/local-export-1733742148373, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/local-export-1733742148373, srcFsUri=hdfs://localhost:35869, srcDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T11:02:28,410 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:35869, inputRoot=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T11:02:28,410 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@1d575971, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/local-export-1733742148373, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/local-export-1733742148373/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-09T11:02:28,412 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-09T11:02:28,422 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testExportWithChecksum to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/local-export-1733742148373/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-09T11:02:28,463 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:02:28,463 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:02:28,464 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:02:29,671 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/hadoop-4008847110279349318.jar 2024-12-09T11:02:29,671 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:02:29,672 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:02:29,765 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/hadoop-11912792060069045851.jar 2024-12-09T11:02:29,765 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:02:29,766 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:02:29,766 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:02:29,767 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:02:29,767 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:02:29,767 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:02:29,768 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-09T11:02:29,768 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-09T11:02:29,768 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-09T11:02:29,769 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-09T11:02:29,769 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-09T11:02:29,769 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-09T11:02:29,770 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-09T11:02:29,770 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-09T11:02:29,770 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-09T11:02:29,771 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-09T11:02:29,771 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-09T11:02:29,771 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T11:02:29,772 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T11:02:29,772 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T11:02:29,772 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T11:02:29,772 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T11:02:29,773 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T11:02:29,773 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T11:02:29,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742345_1521 (size=24020) 2024-12-09T11:02:29,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742345_1521 (size=24020) 2024-12-09T11:02:29,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742345_1521 (size=24020) 2024-12-09T11:02:30,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742346_1522 (size=443171) 2024-12-09T11:02:30,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742346_1522 (size=443171) 2024-12-09T11:02:30,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742346_1522 (size=443171) 2024-12-09T11:02:30,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742347_1523 (size=77755) 2024-12-09T11:02:30,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742347_1523 (size=77755) 2024-12-09T11:02:30,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742347_1523 (size=77755) 2024-12-09T11:02:30,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742348_1524 (size=131360) 2024-12-09T11:02:30,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742348_1524 (size=131360) 2024-12-09T11:02:30,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742348_1524 (size=131360) 2024-12-09T11:02:30,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742349_1525 (size=111793) 2024-12-09T11:02:30,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742349_1525 (size=111793) 2024-12-09T11:02:30,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742349_1525 (size=111793) 2024-12-09T11:02:30,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742350_1526 (size=1832290) 2024-12-09T11:02:30,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742350_1526 (size=1832290) 2024-12-09T11:02:30,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742350_1526 (size=1832290) 2024-12-09T11:02:30,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742351_1527 (size=8360282) 2024-12-09T11:02:30,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742351_1527 (size=8360282) 2024-12-09T11:02:30,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742351_1527 (size=8360282) 2024-12-09T11:02:30,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742352_1528 (size=503880) 2024-12-09T11:02:30,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742352_1528 (size=503880) 2024-12-09T11:02:30,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742352_1528 (size=503880) 2024-12-09T11:02:30,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742353_1529 (size=322274) 2024-12-09T11:02:30,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742353_1529 (size=322274) 2024-12-09T11:02:30,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742353_1529 (size=322274) 2024-12-09T11:02:30,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742354_1530 (size=20406) 2024-12-09T11:02:30,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742354_1530 (size=20406) 2024-12-09T11:02:30,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742354_1530 (size=20406) 2024-12-09T11:02:30,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742355_1531 (size=45609) 2024-12-09T11:02:30,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742355_1531 (size=45609) 2024-12-09T11:02:30,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742355_1531 (size=45609) 2024-12-09T11:02:30,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742356_1532 (size=136454) 2024-12-09T11:02:30,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742356_1532 (size=136454) 2024-12-09T11:02:30,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742356_1532 (size=136454) 2024-12-09T11:02:30,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742357_1533 (size=1597136) 2024-12-09T11:02:30,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742357_1533 (size=1597136) 2024-12-09T11:02:30,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742357_1533 (size=1597136) 2024-12-09T11:02:30,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742358_1534 (size=30873) 2024-12-09T11:02:30,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742358_1534 (size=30873) 2024-12-09T11:02:30,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742358_1534 (size=30873) 2024-12-09T11:02:30,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742359_1535 (size=29229) 2024-12-09T11:02:30,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742359_1535 (size=29229) 2024-12-09T11:02:30,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742359_1535 (size=29229) 2024-12-09T11:02:30,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742360_1536 (size=903861) 2024-12-09T11:02:30,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742360_1536 (size=903861) 2024-12-09T11:02:30,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742360_1536 (size=903861) 2024-12-09T11:02:30,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742361_1537 (size=5175431) 2024-12-09T11:02:30,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742361_1537 (size=5175431) 2024-12-09T11:02:30,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742361_1537 (size=5175431) 2024-12-09T11:02:30,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742362_1538 (size=232881) 2024-12-09T11:02:30,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742362_1538 (size=232881) 2024-12-09T11:02:30,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742362_1538 (size=232881) 2024-12-09T11:02:30,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742363_1539 (size=1323991) 2024-12-09T11:02:30,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742363_1539 (size=1323991) 2024-12-09T11:02:30,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742363_1539 (size=1323991) 2024-12-09T11:02:30,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742364_1540 (size=4695811) 2024-12-09T11:02:30,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742364_1540 (size=4695811) 2024-12-09T11:02:30,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742364_1540 (size=4695811) 2024-12-09T11:02:30,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742365_1541 (size=1877034) 2024-12-09T11:02:30,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742365_1541 (size=1877034) 2024-12-09T11:02:30,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742365_1541 (size=1877034) 2024-12-09T11:02:30,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742366_1542 (size=6425021) 2024-12-09T11:02:30,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742366_1542 (size=6425021) 2024-12-09T11:02:30,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742366_1542 (size=6425021) 2024-12-09T11:02:30,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742367_1543 (size=217555) 2024-12-09T11:02:30,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742367_1543 (size=217555) 2024-12-09T11:02:30,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742367_1543 (size=217555) 2024-12-09T11:02:31,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742368_1544 (size=4188619) 2024-12-09T11:02:31,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742368_1544 (size=4188619) 2024-12-09T11:02:31,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742368_1544 (size=4188619) 2024-12-09T11:02:31,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742369_1545 (size=127628) 2024-12-09T11:02:31,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742369_1545 (size=127628) 2024-12-09T11:02:31,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742369_1545 (size=127628) 2024-12-09T11:02:31,065 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-09T11:02:31,071 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-09T11:02:31,078 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.1 K 2024-12-09T11:02:31,078 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=7.8 K 2024-12-09T11:02:31,078 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=6.2 K 2024-12-09T11:02:31,078 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.1 K 2024-12-09T11:02:31,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742370_1546 (size=1023) 2024-12-09T11:02:31,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742370_1546 (size=1023) 2024-12-09T11:02:31,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742370_1546 (size=1023) 2024-12-09T11:02:31,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742371_1547 (size=35) 2024-12-09T11:02:31,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742371_1547 (size=35) 2024-12-09T11:02:31,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742371_1547 (size=35) 2024-12-09T11:02:31,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742372_1548 (size=304125) 2024-12-09T11:02:31,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742372_1548 (size=304125) 2024-12-09T11:02:31,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742372_1548 (size=304125) 2024-12-09T11:02:31,356 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T11:02:31,356 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T11:02:31,362 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0008_000001 (auth:SIMPLE) from 127.0.0.1:51044 2024-12-09T11:02:31,383 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_2/usercache/jenkins/appcache/application_1733741775522_0008/container_1733741775522_0008_01_000001/launch_container.sh] 2024-12-09T11:02:31,383 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_2/usercache/jenkins/appcache/application_1733741775522_0008/container_1733741775522_0008_01_000001/container_tokens] 2024-12-09T11:02:31,383 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_2/usercache/jenkins/appcache/application_1733741775522_0008/container_1733741775522_0008_01_000001/sysfs] 2024-12-09T11:02:31,839 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0009_000001 (auth:SIMPLE) from 127.0.0.1:56206 2024-12-09T11:02:32,273 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T11:02:34,503 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T11:02:36,469 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-09T11:02:36,469 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum Metrics about Tables on a single HBase RegionServer 2024-12-09T11:02:36,470 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-09T11:02:40,143 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0009_000001 (auth:SIMPLE) from 127.0.0.1:38150 2024-12-09T11:02:40,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742373_1549 (size=349823) 2024-12-09T11:02:40,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742373_1549 (size=349823) 2024-12-09T11:02:40,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742373_1549 (size=349823) 2024-12-09T11:02:41,972 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T11:02:42,470 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 33657 2024-12-09T11:02:42,536 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0009_000001 (auth:SIMPLE) from 127.0.0.1:51756 2024-12-09T11:02:42,540 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0009_000001 (auth:SIMPLE) from 127.0.0.1:37268 2024-12-09T11:02:43,256 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0009_000001 (auth:SIMPLE) from 127.0.0.1:37284 2024-12-09T11:02:43,258 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0009_000001 (auth:SIMPLE) from 127.0.0.1:51760 2024-12-09T11:02:45,382 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733741775522_0009_01_000006 while processing FINISH_CONTAINERS event 2024-12-09T11:02:48,568 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region cca3861865154a4d1c7857f88e2ede7a, had cached 0 bytes from a total of 6484 2024-12-09T11:02:48,568 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region f33da0e723b1140f1a0ea77cd0d168fd, had cached 0 bytes from a total of 14067 Error: java.io.IOException: Checksum mismatch between hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b202412092af6bd6e01344a5cbf1268c8beaa971c_618772b2136080eaf54ed02f04247e75 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/local-export-1733742148373/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b202412092af6bd6e01344a5cbf1268c8beaa971c_618772b2136080eaf54ed02f04247e75. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) Error: java.io.IOException: Checksum mismatch between hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241209528ae9c724a845f2afcb59a9664a91ef_770bc39038dbb128996bd53b724a4854 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/local-export-1733742148373/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241209528ae9c724a845f2afcb59a9664a91ef_770bc39038dbb128996bd53b724a4854. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-09T11:02:52,323 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0009_000001 (auth:SIMPLE) from 127.0.0.1:38752 2024-12-09T11:02:53,310 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0009_000001 (auth:SIMPLE) from 127.0.0.1:38762 Error: java.io.IOException: Checksum mismatch between hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithChecksum/618772b2136080eaf54ed02f04247e75/cf/95d3a2dfd2ff44b6ba46f99bc9257fe2 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/local-export-1733742148373/archive/data/default/testtb-testExportWithChecksum/618772b2136080eaf54ed02f04247e75/cf/95d3a2dfd2ff44b6ba46f99bc9257fe2. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-09T11:02:54,327 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0009_000001 (auth:SIMPLE) from 127.0.0.1:45800 Error: java.io.IOException: Checksum mismatch between hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithChecksum/770bc39038dbb128996bd53b724a4854/cf/95ac9f55b56a4cf58c26f866bfbb1128 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/local-export-1733742148373/archive/data/default/testtb-testExportWithChecksum/770bc39038dbb128996bd53b724a4854/cf/95ac9f55b56a4cf58c26f866bfbb1128. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-09T11:02:54,579 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733741775522_0009_01_000009 while processing FINISH_CONTAINERS event 2024-12-09T11:02:55,337 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0009_000001 (auth:SIMPLE) from 127.0.0.1:45816 2024-12-09T11:02:55,866 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_3/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000003/launch_container.sh] 2024-12-09T11:02:55,866 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_3/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000003/container_tokens] 2024-12-09T11:02:55,867 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_3/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000003/sysfs] 2024-12-09T11:02:56,749 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_0/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000005/launch_container.sh] 2024-12-09T11:02:56,749 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_0/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000005/container_tokens] 2024-12-09T11:02:56,750 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_0/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000005/sysfs] 2024-12-09T11:02:56,767 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733741775522_0009_01_000012 while processing FINISH_CONTAINERS event 2024-12-09T11:02:57,771 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733741775522_0009_01_000013 while processing FINISH_CONTAINERS event 2024-12-09T11:02:58,860 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_1/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000002/launch_container.sh] 2024-12-09T11:02:58,861 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_1/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000002/container_tokens] 2024-12-09T11:02:58,861 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_1/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000002/sysfs] 2024-12-09T11:02:58,951 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_0/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000004/launch_container.sh] 2024-12-09T11:02:58,951 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_0/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000004/container_tokens] 2024-12-09T11:02:58,951 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_0/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000004/sysfs] 2024-12-09T11:03:00,293 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_1/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000007/launch_container.sh] 2024-12-09T11:03:00,293 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_1/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000007/container_tokens] 2024-12-09T11:03:00,293 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_1/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000007/sysfs] 2024-12-09T11:03:00,551 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_1/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000008/launch_container.sh] 2024-12-09T11:03:00,551 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_1/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000008/container_tokens] 2024-12-09T11:03:00,551 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_1/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000008/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b202412092af6bd6e01344a5cbf1268c8beaa971c_618772b2136080eaf54ed02f04247e75 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/local-export-1733742148373/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b202412092af6bd6e01344a5cbf1268c8beaa971c_618772b2136080eaf54ed02f04247e75. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) Error: java.io.IOException: Checksum mismatch between hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241209528ae9c724a845f2afcb59a9664a91ef_770bc39038dbb128996bd53b724a4854 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/local-export-1733742148373/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241209528ae9c724a845f2afcb59a9664a91ef_770bc39038dbb128996bd53b724a4854. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-09T11:03:01,371 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0009_000001 (auth:SIMPLE) from 127.0.0.1:37524 2024-12-09T11:03:01,594 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_3/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000010/launch_container.sh] 2024-12-09T11:03:01,594 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_3/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000010/container_tokens] 2024-12-09T11:03:01,595 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_3/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000010/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithChecksum/618772b2136080eaf54ed02f04247e75/cf/95d3a2dfd2ff44b6ba46f99bc9257fe2 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/local-export-1733742148373/archive/data/default/testtb-testExportWithChecksum/618772b2136080eaf54ed02f04247e75/cf/95d3a2dfd2ff44b6ba46f99bc9257fe2. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-09T11:03:02,344 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_2/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000011/launch_container.sh] 2024-12-09T11:03:02,344 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_2/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000011/container_tokens] 2024-12-09T11:03:02,344 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_2/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000011/sysfs] 2024-12-09T11:03:02,383 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0009_000001 (auth:SIMPLE) from 127.0.0.1:37532 2024-12-09T11:03:02,388 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0009_000001 (auth:SIMPLE) from 127.0.0.1:53164 Error: java.io.IOException: Checksum mismatch between hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithChecksum/770bc39038dbb128996bd53b724a4854/cf/95ac9f55b56a4cf58c26f866bfbb1128 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/local-export-1733742148373/archive/data/default/testtb-testExportWithChecksum/770bc39038dbb128996bd53b724a4854/cf/95ac9f55b56a4cf58c26f866bfbb1128. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-09T11:03:03,400 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0009_000001 (auth:SIMPLE) from 127.0.0.1:37534 2024-12-09T11:03:04,503 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T11:03:05,315 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733741775522_0009_01_000018 while processing FINISH_CONTAINERS event 2024-12-09T11:03:05,519 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733741775522_0009_01_000019 while processing FINISH_CONTAINERS event 2024-12-09T11:03:06,665 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_0/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000014/launch_container.sh] 2024-12-09T11:03:06,665 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_0/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000014/container_tokens] 2024-12-09T11:03:06,665 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_0/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000014/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b202412092af6bd6e01344a5cbf1268c8beaa971c_618772b2136080eaf54ed02f04247e75 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/local-export-1733742148373/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b202412092af6bd6e01344a5cbf1268c8beaa971c_618772b2136080eaf54ed02f04247e75. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-09T11:03:08,423 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0009_000001 (auth:SIMPLE) from 127.0.0.1:37544 2024-12-09T11:03:09,523 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_0/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000015/launch_container.sh] 2024-12-09T11:03:09,523 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_0/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000015/container_tokens] 2024-12-09T11:03:09,523 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_0/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000015/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithChecksum/618772b2136080eaf54ed02f04247e75/cf/95d3a2dfd2ff44b6ba46f99bc9257fe2 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/local-export-1733742148373/archive/data/default/testtb-testExportWithChecksum/618772b2136080eaf54ed02f04247e75/cf/95d3a2dfd2ff44b6ba46f99bc9257fe2. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) Error: java.io.IOException: Checksum mismatch between hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241209528ae9c724a845f2afcb59a9664a91ef_770bc39038dbb128996bd53b724a4854 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/local-export-1733742148373/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241209528ae9c724a845f2afcb59a9664a91ef_770bc39038dbb128996bd53b724a4854. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-09T11:03:10,451 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0009_000001 (auth:SIMPLE) from 127.0.0.1:54310 2024-12-09T11:03:10,451 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0009_000001 (auth:SIMPLE) from 127.0.0.1:40914 Error: java.io.IOException: Checksum mismatch between hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithChecksum/770bc39038dbb128996bd53b724a4854/cf/95ac9f55b56a4cf58c26f866bfbb1128 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/local-export-1733742148373/archive/data/default/testtb-testExportWithChecksum/770bc39038dbb128996bd53b724a4854/cf/95ac9f55b56a4cf58c26f866bfbb1128. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-09T11:03:10,775 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_0/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000017/launch_container.sh] 2024-12-09T11:03:10,775 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_0/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000017/container_tokens] 2024-12-09T11:03:10,775 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_0/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000017/sysfs] 2024-12-09T11:03:12,378 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 618772b2136080eaf54ed02f04247e75, had cached 0 bytes from a total of 14451 2024-12-09T11:03:12,379 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 770bc39038dbb128996bd53b724a4854, had cached 0 bytes from a total of 6310 2024-12-09T11:03:12,472 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0009_000001 (auth:SIMPLE) from 127.0.0.1:40922 2024-12-09T11:03:14,199 DEBUG [master/3469f9ca0af3:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 770bc39038dbb128996bd53b724a4854 changed from -1.0 to 0.0, refreshing cache 2024-12-09T11:03:14,200 DEBUG [master/3469f9ca0af3:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 618772b2136080eaf54ed02f04247e75 changed from -1.0 to 0.0, refreshing cache 2024-12-09T11:03:14,438 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_2/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000016/launch_container.sh] 2024-12-09T11:03:14,438 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_2/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000016/container_tokens] 2024-12-09T11:03:14,438 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_2/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000016/sysfs] 2024-12-09T11:03:15,157 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0009_000001 (auth:SIMPLE) from 127.0.0.1:40924 2024-12-09T11:03:15,158 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0009_000001 (auth:SIMPLE) from 127.0.0.1:40934 2024-12-09T11:03:15,161 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0009_000001 (auth:SIMPLE) from 127.0.0.1:54316 2024-12-09T11:03:15,318 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733741775522_0009_01_000023 is : 143 2024-12-09T11:03:15,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742374_1550 (size=49325) 2024-12-09T11:03:15,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742374_1550 (size=49325) 2024-12-09T11:03:15,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742374_1550 (size=49325) 2024-12-09T11:03:15,350 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_0/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000023/launch_container.sh] 2024-12-09T11:03:15,350 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_0/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000023/container_tokens] 2024-12-09T11:03:15,350 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_0/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000023/sysfs] 2024-12-09T11:03:15,363 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733741775522_0009_01_000022 is : 143 2024-12-09T11:03:15,367 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733741775522_0009_01_000021 is : 143 2024-12-09T11:03:15,377 WARN [ContainersLauncher #7 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_1/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000021/launch_container.sh] 2024-12-09T11:03:15,377 WARN [ContainersLauncher #7 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_1/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000021/container_tokens] 2024-12-09T11:03:15,377 WARN [ContainersLauncher #7 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_1/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000021/sysfs] 2024-12-09T11:03:15,391 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_3/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000022/launch_container.sh] 2024-12-09T11:03:15,391 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_3/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000022/container_tokens] 2024-12-09T11:03:15,391 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_3/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000022/sysfs] 2024-12-09T11:03:15,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742375_1551 (size=461) 2024-12-09T11:03:15,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742375_1551 (size=461) 2024-12-09T11:03:15,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742375_1551 (size=461) 2024-12-09T11:03:15,417 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_1/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000020/launch_container.sh] 2024-12-09T11:03:15,417 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_1/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000020/container_tokens] 2024-12-09T11:03:15,417 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_1/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000020/sysfs] 2024-12-09T11:03:15,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742376_1552 (size=49325) 2024-12-09T11:03:15,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742376_1552 (size=49325) 2024-12-09T11:03:15,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742376_1552 (size=49325) 2024-12-09T11:03:15,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742377_1553 (size=349823) 2024-12-09T11:03:15,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742377_1553 (size=349823) 2024-12-09T11:03:15,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742377_1553 (size=349823) 2024-12-09T11:03:16,782 ERROR [Time-limited test {}] snapshot.ExportSnapshot(1239): Snapshot export failed org.apache.hadoop.hbase.snapshot.ExportSnapshotException: Task failed task_1733741775522_0009_m_000001 Job failed as tasks failed. failedMaps:1 failedReduces:0 killedMaps:0 killedReduces: 0 at org.apache.hadoop.hbase.snapshot.ExportSnapshot.runCopyJob(ExportSnapshot.java:947) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1216) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:570) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemState(TestExportSnapshot.java:400) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportWithChecksum(TestExportSnapshot.java:285) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:03:16,784 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742196783 2024-12-09T11:03:16,784 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:35869, tgtDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742196783, rawTgtDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742196783, srcFsUri=hdfs://localhost:35869, srcDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T11:03:16,804 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a0d08af07fc0beaa578cbd208923b1fb 1/1 column families, dataSize=1.47 KB heapSize=3.49 KB 2024-12-09T11:03:16,820 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/acl/a0d08af07fc0beaa578cbd208923b1fb/.tmp/l/8558c225227440179111c48144d5a91b is 74, key is testtb-testExportFileSystemStateWithMergeRegion-1/l:/1733742120418/DeleteFamily/seqid=0 2024-12-09T11:03:16,821 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:35869, inputRoot=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T11:03:16,821 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1751713061_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742196783, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742196783/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-09T11:03:16,823 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-09T11:03:16,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742378_1554 (size=5791) 2024-12-09T11:03:16,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742378_1554 (size=5791) 2024-12-09T11:03:16,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742378_1554 (size=5791) 2024-12-09T11:03:16,828 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.47 KB at sequenceid=28 (bloomFilter=false), to=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/acl/a0d08af07fc0beaa578cbd208923b1fb/.tmp/l/8558c225227440179111c48144d5a91b 2024-12-09T11:03:16,828 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testExportWithChecksum to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742196783/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-09T11:03:16,832 INFO [MemStoreFlusher.0 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8558c225227440179111c48144d5a91b 2024-12-09T11:03:16,833 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/acl/a0d08af07fc0beaa578cbd208923b1fb/.tmp/l/8558c225227440179111c48144d5a91b as hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/acl/a0d08af07fc0beaa578cbd208923b1fb/l/8558c225227440179111c48144d5a91b 2024-12-09T11:03:16,838 INFO [MemStoreFlusher.0 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8558c225227440179111c48144d5a91b 2024-12-09T11:03:16,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742379_1555 (size=951) 2024-12-09T11:03:16,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742379_1555 (size=951) 2024-12-09T11:03:16,838 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/acl/a0d08af07fc0beaa578cbd208923b1fb/l/8558c225227440179111c48144d5a91b, entries=13, sequenceid=28, filesize=5.7 K 2024-12-09T11:03:16,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742379_1555 (size=951) 2024-12-09T11:03:16,839 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.47 KB/1504, heapSize ~3.48 KB/3560, currentSize=0 B/0 for a0d08af07fc0beaa578cbd208923b1fb in 35ms, sequenceid=28, compaction requested=false 2024-12-09T11:03:16,839 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a0d08af07fc0beaa578cbd208923b1fb: 2024-12-09T11:03:16,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742380_1556 (size=156) 2024-12-09T11:03:16,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742380_1556 (size=156) 2024-12-09T11:03:16,849 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:03:16,849 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:03:16,849 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:03:16,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742380_1556 (size=156) 2024-12-09T11:03:17,919 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/hadoop-4497906511996079235.jar 2024-12-09T11:03:17,920 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:03:17,920 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:03:18,003 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/hadoop-11208084436712758963.jar 2024-12-09T11:03:18,003 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:03:18,004 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:03:18,004 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:03:18,004 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:03:18,004 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:03:18,005 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:03:18,005 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-09T11:03:18,005 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-09T11:03:18,006 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-09T11:03:18,006 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-09T11:03:18,006 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-09T11:03:18,007 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-09T11:03:18,007 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-09T11:03:18,007 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-09T11:03:18,007 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-09T11:03:18,008 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-09T11:03:18,008 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-09T11:03:18,008 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T11:03:18,009 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T11:03:18,009 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T11:03:18,009 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T11:03:18,009 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T11:03:18,010 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T11:03:18,010 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T11:03:18,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742381_1557 (size=24020) 2024-12-09T11:03:18,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742381_1557 (size=24020) 2024-12-09T11:03:18,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742381_1557 (size=24020) 2024-12-09T11:03:18,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742382_1558 (size=77755) 2024-12-09T11:03:18,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742382_1558 (size=77755) 2024-12-09T11:03:18,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742382_1558 (size=77755) 2024-12-09T11:03:18,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742383_1559 (size=131360) 2024-12-09T11:03:18,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742383_1559 (size=131360) 2024-12-09T11:03:18,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742383_1559 (size=131360) 2024-12-09T11:03:18,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742384_1560 (size=111793) 2024-12-09T11:03:18,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742384_1560 (size=111793) 2024-12-09T11:03:18,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742384_1560 (size=111793) 2024-12-09T11:03:18,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742385_1561 (size=1832290) 2024-12-09T11:03:18,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742385_1561 (size=1832290) 2024-12-09T11:03:18,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742385_1561 (size=1832290) 2024-12-09T11:03:18,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742386_1562 (size=8360282) 2024-12-09T11:03:18,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742386_1562 (size=8360282) 2024-12-09T11:03:18,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742386_1562 (size=8360282) 2024-12-09T11:03:18,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742387_1563 (size=503880) 2024-12-09T11:03:18,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742387_1563 (size=503880) 2024-12-09T11:03:18,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742387_1563 (size=503880) 2024-12-09T11:03:18,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742388_1564 (size=322274) 2024-12-09T11:03:18,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742388_1564 (size=322274) 2024-12-09T11:03:18,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742388_1564 (size=322274) 2024-12-09T11:03:18,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742389_1565 (size=20406) 2024-12-09T11:03:18,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742389_1565 (size=20406) 2024-12-09T11:03:18,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742389_1565 (size=20406) 2024-12-09T11:03:18,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742390_1566 (size=45609) 2024-12-09T11:03:18,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742390_1566 (size=45609) 2024-12-09T11:03:18,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742390_1566 (size=45609) 2024-12-09T11:03:18,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742391_1567 (size=136454) 2024-12-09T11:03:18,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742391_1567 (size=136454) 2024-12-09T11:03:18,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742391_1567 (size=136454) 2024-12-09T11:03:18,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742392_1568 (size=1597136) 2024-12-09T11:03:18,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742392_1568 (size=1597136) 2024-12-09T11:03:18,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742392_1568 (size=1597136) 2024-12-09T11:03:18,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742393_1569 (size=30873) 2024-12-09T11:03:18,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742393_1569 (size=30873) 2024-12-09T11:03:18,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742393_1569 (size=30873) 2024-12-09T11:03:18,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742394_1570 (size=29229) 2024-12-09T11:03:18,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742394_1570 (size=29229) 2024-12-09T11:03:18,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742394_1570 (size=29229) 2024-12-09T11:03:18,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742395_1571 (size=443171) 2024-12-09T11:03:18,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742395_1571 (size=443171) 2024-12-09T11:03:18,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742395_1571 (size=443171) 2024-12-09T11:03:18,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742396_1572 (size=903861) 2024-12-09T11:03:18,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742396_1572 (size=903861) 2024-12-09T11:03:18,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742396_1572 (size=903861) 2024-12-09T11:03:18,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742397_1573 (size=5175431) 2024-12-09T11:03:18,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742397_1573 (size=5175431) 2024-12-09T11:03:18,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742397_1573 (size=5175431) 2024-12-09T11:03:18,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742398_1574 (size=232881) 2024-12-09T11:03:18,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742398_1574 (size=232881) 2024-12-09T11:03:18,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742398_1574 (size=232881) 2024-12-09T11:03:18,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742399_1575 (size=1323991) 2024-12-09T11:03:18,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742399_1575 (size=1323991) 2024-12-09T11:03:18,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742399_1575 (size=1323991) 2024-12-09T11:03:19,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742400_1576 (size=4695811) 2024-12-09T11:03:19,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742400_1576 (size=4695811) 2024-12-09T11:03:19,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742400_1576 (size=4695811) 2024-12-09T11:03:19,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742401_1577 (size=1877034) 2024-12-09T11:03:19,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742401_1577 (size=1877034) 2024-12-09T11:03:19,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742401_1577 (size=1877034) 2024-12-09T11:03:19,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742402_1578 (size=6425021) 2024-12-09T11:03:19,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742402_1578 (size=6425021) 2024-12-09T11:03:19,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742402_1578 (size=6425021) 2024-12-09T11:03:19,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742403_1579 (size=217555) 2024-12-09T11:03:19,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742403_1579 (size=217555) 2024-12-09T11:03:19,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742403_1579 (size=217555) 2024-12-09T11:03:19,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742404_1580 (size=4188619) 2024-12-09T11:03:19,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742404_1580 (size=4188619) 2024-12-09T11:03:19,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742404_1580 (size=4188619) 2024-12-09T11:03:19,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742405_1581 (size=127628) 2024-12-09T11:03:19,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742405_1581 (size=127628) 2024-12-09T11:03:19,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742405_1581 (size=127628) 2024-12-09T11:03:19,247 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-09T11:03:19,252 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-09T11:03:19,259 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.1 K 2024-12-09T11:03:19,259 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=7.8 K 2024-12-09T11:03:19,259 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=6.2 K 2024-12-09T11:03:19,259 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.1 K 2024-12-09T11:03:19,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742406_1582 (size=1023) 2024-12-09T11:03:19,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742406_1582 (size=1023) 2024-12-09T11:03:19,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742406_1582 (size=1023) 2024-12-09T11:03:19,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742407_1583 (size=35) 2024-12-09T11:03:19,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742407_1583 (size=35) 2024-12-09T11:03:19,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742407_1583 (size=35) 2024-12-09T11:03:19,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742408_1584 (size=304075) 2024-12-09T11:03:19,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742408_1584 (size=304075) 2024-12-09T11:03:19,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742408_1584 (size=304075) 2024-12-09T11:03:21,568 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T11:03:21,569 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T11:03:21,573 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0009_000001 (auth:SIMPLE) from 127.0.0.1:34930 2024-12-09T11:03:21,608 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_3/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000001/launch_container.sh] 2024-12-09T11:03:21,609 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_3/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000001/container_tokens] 2024-12-09T11:03:21,609 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_3/usercache/jenkins/appcache/application_1733741775522_0009/container_1733741775522_0009_01_000001/sysfs] 2024-12-09T11:03:22,384 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0010_000001 (auth:SIMPLE) from 127.0.0.1:53352 2024-12-09T11:03:27,471 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 33657 2024-12-09T11:03:27,595 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0010_000001 (auth:SIMPLE) from 127.0.0.1:47318 2024-12-09T11:03:27,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742409_1585 (size=349773) 2024-12-09T11:03:27,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742409_1585 (size=349773) 2024-12-09T11:03:27,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742409_1585 (size=349773) 2024-12-09T11:03:29,849 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0010_000001 (auth:SIMPLE) from 127.0.0.1:45014 2024-12-09T11:03:29,849 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0010_000001 (auth:SIMPLE) from 127.0.0.1:36876 2024-12-09T11:03:30,697 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0010_000001 (auth:SIMPLE) from 127.0.0.1:36890 2024-12-09T11:03:30,697 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0010_000001 (auth:SIMPLE) from 127.0.0.1:45028 2024-12-09T11:03:33,568 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region cca3861865154a4d1c7857f88e2ede7a, had cached 0 bytes from a total of 6484 2024-12-09T11:03:33,568 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region f33da0e723b1140f1a0ea77cd0d168fd, had cached 0 bytes from a total of 14067 2024-12-09T11:03:33,573 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733741775522_0010_01_000006 while processing FINISH_CONTAINERS event 2024-12-09T11:03:34,510 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T11:03:35,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742410_1586 (size=14451) 2024-12-09T11:03:35,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742410_1586 (size=14451) 2024-12-09T11:03:35,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742410_1586 (size=14451) 2024-12-09T11:03:35,642 WARN [ContainersLauncher #7 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_0/usercache/jenkins/appcache/application_1733741775522_0010/container_1733741775522_0010_01_000002/launch_container.sh] 2024-12-09T11:03:35,643 WARN [ContainersLauncher #7 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_0/usercache/jenkins/appcache/application_1733741775522_0010/container_1733741775522_0010_01_000002/container_tokens] 2024-12-09T11:03:35,643 WARN [ContainersLauncher #7 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_0/usercache/jenkins/appcache/application_1733741775522_0010/container_1733741775522_0010_01_000002/sysfs] 2024-12-09T11:03:37,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742412_1588 (size=8032) 2024-12-09T11:03:37,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742412_1588 (size=8032) 2024-12-09T11:03:37,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742412_1588 (size=8032) 2024-12-09T11:03:38,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742413_1589 (size=5242) 2024-12-09T11:03:38,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742413_1589 (size=5242) 2024-12-09T11:03:38,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742413_1589 (size=5242) 2024-12-09T11:03:38,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742414_1590 (size=6310) 2024-12-09T11:03:38,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742414_1590 (size=6310) 2024-12-09T11:03:38,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742414_1590 (size=6310) 2024-12-09T11:03:38,181 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_3/usercache/jenkins/appcache/application_1733741775522_0010/container_1733741775522_0010_01_000003/launch_container.sh] 2024-12-09T11:03:38,181 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_3/usercache/jenkins/appcache/application_1733741775522_0010/container_1733741775522_0010_01_000003/container_tokens] 2024-12-09T11:03:38,181 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_3/usercache/jenkins/appcache/application_1733741775522_0010/container_1733741775522_0010_01_000003/sysfs] 2024-12-09T11:03:38,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742411_1587 (size=31733) 2024-12-09T11:03:38,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742411_1587 (size=31733) 2024-12-09T11:03:38,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742411_1587 (size=31733) 2024-12-09T11:03:38,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742415_1591 (size=463) 2024-12-09T11:03:38,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742415_1591 (size=463) 2024-12-09T11:03:38,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742415_1591 (size=463) 2024-12-09T11:03:38,307 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_2/usercache/jenkins/appcache/application_1733741775522_0010/container_1733741775522_0010_01_000005/launch_container.sh] 2024-12-09T11:03:38,307 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_2/usercache/jenkins/appcache/application_1733741775522_0010/container_1733741775522_0010_01_000005/container_tokens] 2024-12-09T11:03:38,307 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_2/usercache/jenkins/appcache/application_1733741775522_0010/container_1733741775522_0010_01_000005/sysfs] 2024-12-09T11:03:38,348 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_3/usercache/jenkins/appcache/application_1733741775522_0010/container_1733741775522_0010_01_000004/launch_container.sh] 2024-12-09T11:03:38,348 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_3/usercache/jenkins/appcache/application_1733741775522_0010/container_1733741775522_0010_01_000004/container_tokens] 2024-12-09T11:03:38,348 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_3/usercache/jenkins/appcache/application_1733741775522_0010/container_1733741775522_0010_01_000004/sysfs] 2024-12-09T11:03:38,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742416_1592 (size=31733) 2024-12-09T11:03:38,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742416_1592 (size=31733) 2024-12-09T11:03:38,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742416_1592 (size=31733) 2024-12-09T11:03:38,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742417_1593 (size=349773) 2024-12-09T11:03:38,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742417_1593 (size=349773) 2024-12-09T11:03:38,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742417_1593 (size=349773) 2024-12-09T11:03:38,425 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0010_000001 (auth:SIMPLE) from 127.0.0.1:45038 2024-12-09T11:03:38,434 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0010_000001 (auth:SIMPLE) from 127.0.0.1:36904 2024-12-09T11:03:38,443 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0010_000001 (auth:SIMPLE) from 127.0.0.1:45048 2024-12-09T11:03:39,620 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-09T11:03:39,621 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-09T11:03:39,627 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportWithChecksum 2024-12-09T11:03:39,627 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-09T11:03:39,627 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-09T11:03:39,627 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1751713061_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-09T11:03:39,628 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-09T11:03:39,628 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-09T11:03:39,628 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1751713061_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742196783/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742196783/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-09T11:03:39,628 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742196783/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-09T11:03:39,628 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742196783/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-09T11:03:39,633 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithChecksum 2024-12-09T11:03:39,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=227, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum 2024-12-09T11:03:39,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=227 2024-12-09T11:03:39,636 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733742219635"}]},"ts":"1733742219635"} 2024-12-09T11:03:39,637 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLING in hbase:meta 2024-12-09T11:03:39,637 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithChecksum to state=DISABLING 2024-12-09T11:03:39,638 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=228, ppid=227, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum}] 2024-12-09T11:03:39,639 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=229, ppid=228, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=770bc39038dbb128996bd53b724a4854, UNASSIGN}, {pid=230, ppid=228, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=618772b2136080eaf54ed02f04247e75, UNASSIGN}] 2024-12-09T11:03:39,640 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=230, ppid=228, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=618772b2136080eaf54ed02f04247e75, UNASSIGN 2024-12-09T11:03:39,640 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=229, ppid=228, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=770bc39038dbb128996bd53b724a4854, UNASSIGN 2024-12-09T11:03:39,640 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=229 updating hbase:meta row=770bc39038dbb128996bd53b724a4854, regionState=CLOSING, regionLocation=3469f9ca0af3,33293,1733741767044 2024-12-09T11:03:39,640 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=230 updating hbase:meta row=618772b2136080eaf54ed02f04247e75, regionState=CLOSING, regionLocation=3469f9ca0af3,39691,1733741766880 2024-12-09T11:03:39,642 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=230, ppid=228, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=618772b2136080eaf54ed02f04247e75, UNASSIGN because future has completed 2024-12-09T11:03:39,642 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T11:03:39,642 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=231, ppid=230, state=RUNNABLE, hasLock=false; CloseRegionProcedure 618772b2136080eaf54ed02f04247e75, server=3469f9ca0af3,39691,1733741766880}] 2024-12-09T11:03:39,643 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=229, ppid=228, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=770bc39038dbb128996bd53b724a4854, UNASSIGN because future has completed 2024-12-09T11:03:39,643 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T11:03:39,643 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=232, ppid=229, state=RUNNABLE, hasLock=false; CloseRegionProcedure 770bc39038dbb128996bd53b724a4854, server=3469f9ca0af3,33293,1733741767044}] 2024-12-09T11:03:39,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=227 2024-12-09T11:03:39,795 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] handler.UnassignRegionHandler(122): Close 618772b2136080eaf54ed02f04247e75 2024-12-09T11:03:39,795 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T11:03:39,795 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1722): Closing 618772b2136080eaf54ed02f04247e75, disabling compactions & flushes 2024-12-09T11:03:39,795 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1733742147042.618772b2136080eaf54ed02f04247e75. 2024-12-09T11:03:39,795 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1733742147042.618772b2136080eaf54ed02f04247e75. 2024-12-09T11:03:39,795 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1733742147042.618772b2136080eaf54ed02f04247e75. after waiting 0 ms 2024-12-09T11:03:39,795 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1733742147042.618772b2136080eaf54ed02f04247e75. 2024-12-09T11:03:39,796 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] handler.UnassignRegionHandler(122): Close 770bc39038dbb128996bd53b724a4854 2024-12-09T11:03:39,796 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T11:03:39,796 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1722): Closing 770bc39038dbb128996bd53b724a4854, disabling compactions & flushes 2024-12-09T11:03:39,796 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1733742147042.770bc39038dbb128996bd53b724a4854. 2024-12-09T11:03:39,796 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1733742147042.770bc39038dbb128996bd53b724a4854. 2024-12-09T11:03:39,796 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1733742147042.770bc39038dbb128996bd53b724a4854. after waiting 0 ms 2024-12-09T11:03:39,796 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1733742147042.770bc39038dbb128996bd53b724a4854. 2024-12-09T11:03:39,802 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithChecksum/618772b2136080eaf54ed02f04247e75/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T11:03:39,802 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithChecksum/770bc39038dbb128996bd53b724a4854/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T11:03:39,802 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T11:03:39,802 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T11:03:39,803 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1733742147042.770bc39038dbb128996bd53b724a4854. 2024-12-09T11:03:39,803 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1733742147042.618772b2136080eaf54ed02f04247e75. 2024-12-09T11:03:39,803 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1676): Region close journal for 770bc39038dbb128996bd53b724a4854: Waiting for close lock at 1733742219796Running coprocessor pre-close hooks at 1733742219796Disabling compacts and flushes for region at 1733742219796Disabling writes for close at 1733742219796Writing region close event to WAL at 1733742219797 (+1 ms)Running coprocessor post-close hooks at 1733742219802 (+5 ms)Closed at 1733742219803 (+1 ms) 2024-12-09T11:03:39,803 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1676): Region close journal for 618772b2136080eaf54ed02f04247e75: Waiting for close lock at 1733742219795Running coprocessor pre-close hooks at 1733742219795Disabling compacts and flushes for region at 1733742219795Disabling writes for close at 1733742219795Writing region close event to WAL at 1733742219796 (+1 ms)Running coprocessor post-close hooks at 1733742219802 (+6 ms)Closed at 1733742219803 (+1 ms) 2024-12-09T11:03:39,804 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] handler.UnassignRegionHandler(157): Closed 618772b2136080eaf54ed02f04247e75 2024-12-09T11:03:39,805 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=230 updating hbase:meta row=618772b2136080eaf54ed02f04247e75, regionState=CLOSED 2024-12-09T11:03:39,805 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] handler.UnassignRegionHandler(157): Closed 770bc39038dbb128996bd53b724a4854 2024-12-09T11:03:39,806 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=229 updating hbase:meta row=770bc39038dbb128996bd53b724a4854, regionState=CLOSED 2024-12-09T11:03:39,807 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=231, ppid=230, state=RUNNABLE, hasLock=false; CloseRegionProcedure 618772b2136080eaf54ed02f04247e75, server=3469f9ca0af3,39691,1733741766880 because future has completed 2024-12-09T11:03:39,807 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=232, ppid=229, state=RUNNABLE, hasLock=false; CloseRegionProcedure 770bc39038dbb128996bd53b724a4854, server=3469f9ca0af3,33293,1733741767044 because future has completed 2024-12-09T11:03:39,809 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=231, resume processing ppid=230 2024-12-09T11:03:39,810 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=232, resume processing ppid=229 2024-12-09T11:03:39,810 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=231, ppid=230, state=SUCCESS, hasLock=false; CloseRegionProcedure 618772b2136080eaf54ed02f04247e75, server=3469f9ca0af3,39691,1733741766880 in 166 msec 2024-12-09T11:03:39,810 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=232, ppid=229, state=SUCCESS, hasLock=false; CloseRegionProcedure 770bc39038dbb128996bd53b724a4854, server=3469f9ca0af3,33293,1733741767044 in 165 msec 2024-12-09T11:03:39,811 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=230, ppid=228, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=618772b2136080eaf54ed02f04247e75, UNASSIGN in 170 msec 2024-12-09T11:03:39,812 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=229, resume processing ppid=228 2024-12-09T11:03:39,812 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=229, ppid=228, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=770bc39038dbb128996bd53b724a4854, UNASSIGN in 171 msec 2024-12-09T11:03:39,814 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=228, resume processing ppid=227 2024-12-09T11:03:39,814 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=228, ppid=227, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum in 174 msec 2024-12-09T11:03:39,815 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733742219814"}]},"ts":"1733742219814"} 2024-12-09T11:03:39,816 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLED in hbase:meta 2024-12-09T11:03:39,816 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithChecksum to state=DISABLED 2024-12-09T11:03:39,817 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=227, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum in 183 msec 2024-12-09T11:03:39,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=227 2024-12-09T11:03:39,952 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithChecksum completed 2024-12-09T11:03:39,952 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithChecksum 2024-12-09T11:03:39,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=233, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-09T11:03:39,954 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=233, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-09T11:03:39,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithChecksum 2024-12-09T11:03:39,955 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=233, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-09T11:03:39,959 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39691 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithChecksum 2024-12-09T11:03:39,959 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithChecksum/770bc39038dbb128996bd53b724a4854 2024-12-09T11:03:39,959 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithChecksum/618772b2136080eaf54ed02f04247e75 2024-12-09T11:03:39,960 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithChecksum/770bc39038dbb128996bd53b724a4854/cf, FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithChecksum/770bc39038dbb128996bd53b724a4854/recovered.edits] 2024-12-09T11:03:39,960 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithChecksum/618772b2136080eaf54ed02f04247e75/cf, FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithChecksum/618772b2136080eaf54ed02f04247e75/recovered.edits] 2024-12-09T11:03:39,962 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-09T11:03:39,962 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-09T11:03:39,962 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-09T11:03:39,962 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-09T11:03:39,963 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-09T11:03:39,963 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-09T11:03:39,963 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-09T11:03:39,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-09T11:03:39,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-09T11:03:39,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:03:39,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:03:39,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-09T11:03:39,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:03:39,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=233 2024-12-09T11:03:39,965 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T11:03:39,965 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T11:03:39,965 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T11:03:39,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:03:39,966 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data null 2024-12-09T11:03:39,966 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-09T11:03:39,967 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T11:03:39,967 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithChecksum/618772b2136080eaf54ed02f04247e75/cf/95d3a2dfd2ff44b6ba46f99bc9257fe2 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportWithChecksum/618772b2136080eaf54ed02f04247e75/cf/95d3a2dfd2ff44b6ba46f99bc9257fe2 2024-12-09T11:03:39,967 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithChecksum/770bc39038dbb128996bd53b724a4854/cf/95ac9f55b56a4cf58c26f866bfbb1128 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportWithChecksum/770bc39038dbb128996bd53b724a4854/cf/95ac9f55b56a4cf58c26f866bfbb1128 2024-12-09T11:03:39,970 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithChecksum/770bc39038dbb128996bd53b724a4854/recovered.edits/9.seqid to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportWithChecksum/770bc39038dbb128996bd53b724a4854/recovered.edits/9.seqid 2024-12-09T11:03:39,970 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithChecksum/618772b2136080eaf54ed02f04247e75/recovered.edits/9.seqid to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportWithChecksum/618772b2136080eaf54ed02f04247e75/recovered.edits/9.seqid 2024-12-09T11:03:39,970 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithChecksum/770bc39038dbb128996bd53b724a4854 2024-12-09T11:03:39,970 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportWithChecksum/618772b2136080eaf54ed02f04247e75 2024-12-09T11:03:39,970 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithChecksum regions 2024-12-09T11:03:39,971 DEBUG [PEWorker-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6 2024-12-09T11:03:39,971 DEBUG [PEWorker-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf] 2024-12-09T11:03:39,974 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b202412092af6bd6e01344a5cbf1268c8beaa971c_618772b2136080eaf54ed02f04247e75 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b202412092af6bd6e01344a5cbf1268c8beaa971c_618772b2136080eaf54ed02f04247e75 2024-12-09T11:03:39,975 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241209528ae9c724a845f2afcb59a9664a91ef_770bc39038dbb128996bd53b724a4854 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241209528ae9c724a845f2afcb59a9664a91ef_770bc39038dbb128996bd53b724a4854 2024-12-09T11:03:39,976 DEBUG [PEWorker-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6 2024-12-09T11:03:39,977 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=233, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-09T11:03:39,980 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithChecksum from hbase:meta 2024-12-09T11:03:39,983 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithChecksum' descriptor. 2024-12-09T11:03:39,984 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=233, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-09T11:03:39,984 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithChecksum' from region states. 2024-12-09T11:03:39,984 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,,1733742147042.770bc39038dbb128996bd53b724a4854.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733742219984"}]},"ts":"9223372036854775807"} 2024-12-09T11:03:39,984 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,1,1733742147042.618772b2136080eaf54ed02f04247e75.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733742219984"}]},"ts":"9223372036854775807"} 2024-12-09T11:03:39,986 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-09T11:03:39,986 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 770bc39038dbb128996bd53b724a4854, NAME => 'testtb-testExportWithChecksum,,1733742147042.770bc39038dbb128996bd53b724a4854.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 618772b2136080eaf54ed02f04247e75, NAME => 'testtb-testExportWithChecksum,1,1733742147042.618772b2136080eaf54ed02f04247e75.', STARTKEY => '1', ENDKEY => ''}] 2024-12-09T11:03:39,986 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithChecksum' as deleted. 2024-12-09T11:03:39,986 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733742219986"}]},"ts":"9223372036854775807"} 2024-12-09T11:03:39,988 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithChecksum state from META 2024-12-09T11:03:39,988 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=233, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-09T11:03:39,989 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=233, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum in 35 msec 2024-12-09T11:03:40,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=233 2024-12-09T11:03:40,072 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithChecksum 2024-12-09T11:03:40,072 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithChecksum completed 2024-12-09T11:03:40,077 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithChecksum" type: DISABLED 2024-12-09T11:03:40,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithChecksum 2024-12-09T11:03:40,080 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithChecksum" type: DISABLED 2024-12-09T11:03:40,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithChecksum 2024-12-09T11:03:40,102 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportWithChecksum Thread=824 (was 825), OpenFileDescriptor=827 (was 833), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1007 (was 840) - SystemLoadAverage LEAK? -, ProcessCount=21 (was 17) - ProcessCount LEAK? -, AvailableMemoryMB=1819 (was 2337) 2024-12-09T11:03:40,102 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=824 is superior to 500 2024-12-09T11:03:40,119 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=824, OpenFileDescriptor=827, MaxFileDescriptor=1048576, SystemLoadAverage=1007, ProcessCount=21, AvailableMemoryMB=1818 2024-12-09T11:03:40,119 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=824 is superior to 500 2024-12-09T11:03:40,121 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T11:03:40,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=234, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T11:03:40,123 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=234, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T11:03:40,123 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSkipTmp" procId is: 234 2024-12-09T11:03:40,123 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=234, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T11:03:40,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=234 2024-12-09T11:03:40,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742418_1594 (size=454) 2024-12-09T11:03:40,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742418_1594 (size=454) 2024-12-09T11:03:40,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742418_1594 (size=454) 2024-12-09T11:03:40,136 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 5c02cf3502bb3ec9f86f1dfde13ba523, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733742220120.5c02cf3502bb3ec9f86f1dfde13ba523.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T11:03:40,136 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => de39d8387da850a6da12447220425587, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733742220120.de39d8387da850a6da12447220425587.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T11:03:40,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742420_1596 (size=79) 2024-12-09T11:03:40,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742419_1595 (size=79) 2024-12-09T11:03:40,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742419_1595 (size=79) 2024-12-09T11:03:40,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742420_1596 (size=79) 2024-12-09T11:03:40,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742420_1596 (size=79) 2024-12-09T11:03:40,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742419_1595 (size=79) 2024-12-09T11:03:40,148 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1733742220120.5c02cf3502bb3ec9f86f1dfde13ba523.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:03:40,148 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1733742220120.de39d8387da850a6da12447220425587.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:03:40,148 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1722): Closing 5c02cf3502bb3ec9f86f1dfde13ba523, disabling compactions & flushes 2024-12-09T11:03:40,148 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1722): Closing de39d8387da850a6da12447220425587, disabling compactions & flushes 2024-12-09T11:03:40,148 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1733742220120.5c02cf3502bb3ec9f86f1dfde13ba523. 2024-12-09T11:03:40,148 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1733742220120.de39d8387da850a6da12447220425587. 2024-12-09T11:03:40,148 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733742220120.de39d8387da850a6da12447220425587. 2024-12-09T11:03:40,148 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733742220120.5c02cf3502bb3ec9f86f1dfde13ba523. 2024-12-09T11:03:40,149 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733742220120.de39d8387da850a6da12447220425587. after waiting 0 ms 2024-12-09T11:03:40,149 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733742220120.5c02cf3502bb3ec9f86f1dfde13ba523. after waiting 0 ms 2024-12-09T11:03:40,149 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1733742220120.de39d8387da850a6da12447220425587. 2024-12-09T11:03:40,149 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1733742220120.5c02cf3502bb3ec9f86f1dfde13ba523. 2024-12-09T11:03:40,149 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1733742220120.de39d8387da850a6da12447220425587. 2024-12-09T11:03:40,149 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1733742220120.5c02cf3502bb3ec9f86f1dfde13ba523. 2024-12-09T11:03:40,149 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1676): Region close journal for de39d8387da850a6da12447220425587: Waiting for close lock at 1733742220148Disabling compacts and flushes for region at 1733742220148Disabling writes for close at 1733742220149 (+1 ms)Writing region close event to WAL at 1733742220149Closed at 1733742220149 2024-12-09T11:03:40,149 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1676): Region close journal for 5c02cf3502bb3ec9f86f1dfde13ba523: Waiting for close lock at 1733742220148Disabling compacts and flushes for region at 1733742220148Disabling writes for close at 1733742220149 (+1 ms)Writing region close event to WAL at 1733742220149Closed at 1733742220149 2024-12-09T11:03:40,150 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=234, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T11:03:40,150 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1733742220120.de39d8387da850a6da12447220425587.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1733742220150"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733742220150"}]},"ts":"1733742220150"} 2024-12-09T11:03:40,150 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1733742220120.5c02cf3502bb3ec9f86f1dfde13ba523.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1733742220150"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733742220150"}]},"ts":"1733742220150"} 2024-12-09T11:03:40,152 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-09T11:03:40,153 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=234, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T11:03:40,153 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733742220153"}]},"ts":"1733742220153"} 2024-12-09T11:03:40,155 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLING in hbase:meta 2024-12-09T11:03:40,155 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {3469f9ca0af3=0} racks are {/default-rack=0} 2024-12-09T11:03:40,156 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T11:03:40,156 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T11:03:40,156 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T11:03:40,156 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T11:03:40,156 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T11:03:40,156 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T11:03:40,156 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T11:03:40,156 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T11:03:40,156 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T11:03:40,156 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T11:03:40,156 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=235, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=5c02cf3502bb3ec9f86f1dfde13ba523, ASSIGN}, {pid=236, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=de39d8387da850a6da12447220425587, ASSIGN}] 2024-12-09T11:03:40,157 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=235, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=5c02cf3502bb3ec9f86f1dfde13ba523, ASSIGN 2024-12-09T11:03:40,157 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=236, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=de39d8387da850a6da12447220425587, ASSIGN 2024-12-09T11:03:40,158 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=235, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=5c02cf3502bb3ec9f86f1dfde13ba523, ASSIGN; state=OFFLINE, location=3469f9ca0af3,42349,1733741767108; forceNewPlan=false, retain=false 2024-12-09T11:03:40,158 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=236, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=de39d8387da850a6da12447220425587, ASSIGN; state=OFFLINE, location=3469f9ca0af3,33293,1733741767044; forceNewPlan=false, retain=false 2024-12-09T11:03:40,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=234 2024-12-09T11:03:40,308 INFO [3469f9ca0af3:35815 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-09T11:03:40,309 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=236 updating hbase:meta row=de39d8387da850a6da12447220425587, regionState=OPENING, regionLocation=3469f9ca0af3,33293,1733741767044 2024-12-09T11:03:40,309 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=235 updating hbase:meta row=5c02cf3502bb3ec9f86f1dfde13ba523, regionState=OPENING, regionLocation=3469f9ca0af3,42349,1733741767108 2024-12-09T11:03:40,311 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=236, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=de39d8387da850a6da12447220425587, ASSIGN because future has completed 2024-12-09T11:03:40,311 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=237, ppid=236, state=RUNNABLE, hasLock=false; OpenRegionProcedure de39d8387da850a6da12447220425587, server=3469f9ca0af3,33293,1733741767044}] 2024-12-09T11:03:40,312 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=235, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=5c02cf3502bb3ec9f86f1dfde13ba523, ASSIGN because future has completed 2024-12-09T11:03:40,312 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=238, ppid=235, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5c02cf3502bb3ec9f86f1dfde13ba523, server=3469f9ca0af3,42349,1733741767108}] 2024-12-09T11:03:40,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=234 2024-12-09T11:03:40,465 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46367, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T11:03:40,467 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,1,1733742220120.de39d8387da850a6da12447220425587. 2024-12-09T11:03:40,467 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(7752): Opening region: {ENCODED => de39d8387da850a6da12447220425587, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733742220120.de39d8387da850a6da12447220425587.', STARTKEY => '1', ENDKEY => ''} 2024-12-09T11:03:40,467 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,1,1733742220120.de39d8387da850a6da12447220425587. service=AccessControlService 2024-12-09T11:03:40,467 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,,1733742220120.5c02cf3502bb3ec9f86f1dfde13ba523. 2024-12-09T11:03:40,467 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(7752): Opening region: {ENCODED => 5c02cf3502bb3ec9f86f1dfde13ba523, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733742220120.5c02cf3502bb3ec9f86f1dfde13ba523.', STARTKEY => '', ENDKEY => '1'} 2024-12-09T11:03:40,467 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T11:03:40,467 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,,1733742220120.5c02cf3502bb3ec9f86f1dfde13ba523. service=AccessControlService 2024-12-09T11:03:40,467 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp de39d8387da850a6da12447220425587 2024-12-09T11:03:40,467 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1733742220120.de39d8387da850a6da12447220425587.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:03:40,467 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T11:03:40,468 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(7794): checking encryption for de39d8387da850a6da12447220425587 2024-12-09T11:03:40,468 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 5c02cf3502bb3ec9f86f1dfde13ba523 2024-12-09T11:03:40,468 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(7797): checking classloading for de39d8387da850a6da12447220425587 2024-12-09T11:03:40,468 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1733742220120.5c02cf3502bb3ec9f86f1dfde13ba523.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:03:40,468 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(7794): checking encryption for 5c02cf3502bb3ec9f86f1dfde13ba523 2024-12-09T11:03:40,468 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(7797): checking classloading for 5c02cf3502bb3ec9f86f1dfde13ba523 2024-12-09T11:03:40,469 INFO [StoreOpener-5c02cf3502bb3ec9f86f1dfde13ba523-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 5c02cf3502bb3ec9f86f1dfde13ba523 2024-12-09T11:03:40,469 INFO [StoreOpener-de39d8387da850a6da12447220425587-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region de39d8387da850a6da12447220425587 2024-12-09T11:03:40,470 INFO [StoreOpener-de39d8387da850a6da12447220425587-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region de39d8387da850a6da12447220425587 columnFamilyName cf 2024-12-09T11:03:40,470 INFO [StoreOpener-5c02cf3502bb3ec9f86f1dfde13ba523-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5c02cf3502bb3ec9f86f1dfde13ba523 columnFamilyName cf 2024-12-09T11:03:40,471 DEBUG [StoreOpener-de39d8387da850a6da12447220425587-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:03:40,471 DEBUG [StoreOpener-5c02cf3502bb3ec9f86f1dfde13ba523-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:03:40,471 INFO [StoreOpener-5c02cf3502bb3ec9f86f1dfde13ba523-1 {}] regionserver.HStore(327): Store=5c02cf3502bb3ec9f86f1dfde13ba523/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:03:40,471 INFO [StoreOpener-de39d8387da850a6da12447220425587-1 {}] regionserver.HStore(327): Store=de39d8387da850a6da12447220425587/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:03:40,471 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1038): replaying wal for de39d8387da850a6da12447220425587 2024-12-09T11:03:40,471 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1038): replaying wal for 5c02cf3502bb3ec9f86f1dfde13ba523 2024-12-09T11:03:40,472 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSkipTmp/de39d8387da850a6da12447220425587 2024-12-09T11:03:40,472 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSkipTmp/5c02cf3502bb3ec9f86f1dfde13ba523 2024-12-09T11:03:40,472 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSkipTmp/de39d8387da850a6da12447220425587 2024-12-09T11:03:40,472 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSkipTmp/5c02cf3502bb3ec9f86f1dfde13ba523 2024-12-09T11:03:40,472 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1048): stopping wal replay for 5c02cf3502bb3ec9f86f1dfde13ba523 2024-12-09T11:03:40,472 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1048): stopping wal replay for de39d8387da850a6da12447220425587 2024-12-09T11:03:40,472 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1060): Cleaning up temporary data for 5c02cf3502bb3ec9f86f1dfde13ba523 2024-12-09T11:03:40,473 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1060): Cleaning up temporary data for de39d8387da850a6da12447220425587 2024-12-09T11:03:40,474 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1093): writing seq id for 5c02cf3502bb3ec9f86f1dfde13ba523 2024-12-09T11:03:40,474 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1093): writing seq id for de39d8387da850a6da12447220425587 2024-12-09T11:03:40,475 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSkipTmp/5c02cf3502bb3ec9f86f1dfde13ba523/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:03:40,475 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSkipTmp/de39d8387da850a6da12447220425587/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:03:40,475 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1114): Opened de39d8387da850a6da12447220425587; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65544137, jitterRate=-0.02331624925136566}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T11:03:40,476 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1122): Running coprocessor post-open hooks for de39d8387da850a6da12447220425587 2024-12-09T11:03:40,476 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1114): Opened 5c02cf3502bb3ec9f86f1dfde13ba523; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62717277, jitterRate=-0.06543974578380585}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T11:03:40,476 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5c02cf3502bb3ec9f86f1dfde13ba523 2024-12-09T11:03:40,476 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1006): Region open journal for de39d8387da850a6da12447220425587: Running coprocessor pre-open hook at 1733742220468Writing region info on filesystem at 1733742220468Initializing all the Stores at 1733742220468Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733742220468Cleaning up temporary data from old regions at 1733742220473 (+5 ms)Running coprocessor post-open hooks at 1733742220476 (+3 ms)Region opened successfully at 1733742220476 2024-12-09T11:03:40,476 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1006): Region open journal for 5c02cf3502bb3ec9f86f1dfde13ba523: Running coprocessor pre-open hook at 1733742220468Writing region info on filesystem at 1733742220468Initializing all the Stores at 1733742220468Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733742220468Cleaning up temporary data from old regions at 1733742220472 (+4 ms)Running coprocessor post-open hooks at 1733742220476 (+4 ms)Region opened successfully at 1733742220476 2024-12-09T11:03:40,477 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,1,1733742220120.de39d8387da850a6da12447220425587., pid=237, masterSystemTime=1733742220464 2024-12-09T11:03:40,477 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,,1733742220120.5c02cf3502bb3ec9f86f1dfde13ba523., pid=238, masterSystemTime=1733742220464 2024-12-09T11:03:40,478 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,1,1733742220120.de39d8387da850a6da12447220425587. 2024-12-09T11:03:40,478 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,1,1733742220120.de39d8387da850a6da12447220425587. 2024-12-09T11:03:40,479 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=236 updating hbase:meta row=de39d8387da850a6da12447220425587, regionState=OPEN, openSeqNum=2, regionLocation=3469f9ca0af3,33293,1733741767044 2024-12-09T11:03:40,479 DEBUG [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,,1733742220120.5c02cf3502bb3ec9f86f1dfde13ba523. 2024-12-09T11:03:40,479 INFO [RS_OPEN_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,,1733742220120.5c02cf3502bb3ec9f86f1dfde13ba523. 2024-12-09T11:03:40,480 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=235 updating hbase:meta row=5c02cf3502bb3ec9f86f1dfde13ba523, regionState=OPEN, openSeqNum=2, regionLocation=3469f9ca0af3,42349,1733741767108 2024-12-09T11:03:40,480 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=237, ppid=236, state=RUNNABLE, hasLock=false; OpenRegionProcedure de39d8387da850a6da12447220425587, server=3469f9ca0af3,33293,1733741767044 because future has completed 2024-12-09T11:03:40,481 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=238, ppid=235, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5c02cf3502bb3ec9f86f1dfde13ba523, server=3469f9ca0af3,42349,1733741767108 because future has completed 2024-12-09T11:03:40,484 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=237, resume processing ppid=236 2024-12-09T11:03:40,484 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=237, ppid=236, state=SUCCESS, hasLock=false; OpenRegionProcedure de39d8387da850a6da12447220425587, server=3469f9ca0af3,33293,1733741767044 in 171 msec 2024-12-09T11:03:40,485 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=238, resume processing ppid=235 2024-12-09T11:03:40,485 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=238, ppid=235, state=SUCCESS, hasLock=false; OpenRegionProcedure 5c02cf3502bb3ec9f86f1dfde13ba523, server=3469f9ca0af3,42349,1733741767108 in 170 msec 2024-12-09T11:03:40,485 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=236, ppid=234, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=de39d8387da850a6da12447220425587, ASSIGN in 328 msec 2024-12-09T11:03:40,487 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=235, resume processing ppid=234 2024-12-09T11:03:40,487 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=235, ppid=234, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=5c02cf3502bb3ec9f86f1dfde13ba523, ASSIGN in 329 msec 2024-12-09T11:03:40,487 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=234, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T11:03:40,488 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733742220487"}]},"ts":"1733742220487"} 2024-12-09T11:03:40,489 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLED in hbase:meta 2024-12-09T11:03:40,490 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=234, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T11:03:40,490 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSkipTmp jenkins: RWXCA 2024-12-09T11:03:40,493 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39691 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-09T11:03:40,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:03:40,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:03:40,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:03:40,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:03:40,497 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T11:03:40,497 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T11:03:40,497 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T11:03:40,498 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-09T11:03:40,498 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-09T11:03:40,498 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-09T11:03:40,498 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T11:03:40,498 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-09T11:03:40,499 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=234, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 376 msec 2024-12-09T11:03:40,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=234 2024-12-09T11:03:40,753 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-09T11:03:40,753 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-09T11:03:40,755 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T11:03:40,755 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1733742220120.5c02cf3502bb3ec9f86f1dfde13ba523. 2024-12-09T11:03:40,755 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T11:03:40,757 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-09T11:03:40,761 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-09T11:03:40,764 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53566, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:03:40,766 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-09T11:03:40,768 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-09T11:03:40,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733742220768 (current time:1733742220768). 2024-12-09T11:03:40,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T11:03:40,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-09T11:03:40,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T11:03:40,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10aad033, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:03:40,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T11:03:40,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T11:03:40,770 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T11:03:40,770 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T11:03:40,770 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T11:03:40,770 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9f04ab0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:03:40,771 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T11:03:40,771 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T11:03:40,771 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:03:40,771 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59268, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T11:03:40,772 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f519408, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:03:40,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:03:40,773 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,33293,1733741767044, seqNum=-1] 2024-12-09T11:03:40,773 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:03:40,774 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33488, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:03:40,775 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815. 2024-12-09T11:03:40,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T11:03:40,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:03:40,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:03:40,775 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T11:03:40,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c1fae4a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:03:40,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T11:03:40,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T11:03:40,776 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T11:03:40,776 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T11:03:40,776 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T11:03:40,776 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@158c1167, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:03:40,776 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T11:03:40,777 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T11:03:40,777 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:03:40,777 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59298, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T11:03:40,778 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@469829cb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:03:40,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:03:40,778 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,33293,1733741767044, seqNum=-1] 2024-12-09T11:03:40,779 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:03:40,779 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33496, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:03:40,781 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb., hostname=3469f9ca0af3,39691,1733741766880, seqNum=2] 2024-12-09T11:03:40,781 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:03:40,781 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59958, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:03:40,782 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815. 2024-12-09T11:03:40,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T11:03:40,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:03:40,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:03:40,783 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T11:03:40,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-09T11:03:40,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T11:03:40,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=239, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-09T11:03:40,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 239 2024-12-09T11:03:40,785 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T11:03:40,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-12-09T11:03:40,786 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T11:03:40,787 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T11:03:40,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742421_1597 (size=203) 2024-12-09T11:03:40,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742421_1597 (size=203) 2024-12-09T11:03:40,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742421_1597 (size=203) 2024-12-09T11:03:40,793 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T11:03:40,793 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=240, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5c02cf3502bb3ec9f86f1dfde13ba523}, {pid=241, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure de39d8387da850a6da12447220425587}] 2024-12-09T11:03:40,794 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=240, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5c02cf3502bb3ec9f86f1dfde13ba523 2024-12-09T11:03:40,794 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=241, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure de39d8387da850a6da12447220425587 2024-12-09T11:03:40,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-12-09T11:03:40,946 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33293 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=241 2024-12-09T11:03:40,946 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42349 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=240 2024-12-09T11:03:40,946 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733742220120.de39d8387da850a6da12447220425587. 2024-12-09T11:03:40,946 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733742220120.5c02cf3502bb3ec9f86f1dfde13ba523. 2024-12-09T11:03:40,946 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.HRegion(2603): Flush status journal for de39d8387da850a6da12447220425587: 2024-12-09T11:03:40,946 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1733742220120.de39d8387da850a6da12447220425587. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-09T11:03:40,946 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.HRegion(2603): Flush status journal for 5c02cf3502bb3ec9f86f1dfde13ba523: 2024-12-09T11:03:40,946 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1733742220120.5c02cf3502bb3ec9f86f1dfde13ba523. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-09T11:03:40,946 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1733742220120.de39d8387da850a6da12447220425587.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-09T11:03:40,946 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T11:03:40,946 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-09T11:03:40,946 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1733742220120.5c02cf3502bb3ec9f86f1dfde13ba523.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-09T11:03:40,946 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T11:03:40,946 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-09T11:03:40,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742422_1598 (size=82) 2024-12-09T11:03:40,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742422_1598 (size=82) 2024-12-09T11:03:40,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742422_1598 (size=82) 2024-12-09T11:03:40,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742423_1599 (size=82) 2024-12-09T11:03:40,952 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733742220120.de39d8387da850a6da12447220425587. 2024-12-09T11:03:40,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742423_1599 (size=82) 2024-12-09T11:03:40,953 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=241 2024-12-09T11:03:40,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742423_1599 (size=82) 2024-12-09T11:03:40,953 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733742220120.5c02cf3502bb3ec9f86f1dfde13ba523. 2024-12-09T11:03:40,953 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=240 2024-12-09T11:03:40,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=241 2024-12-09T11:03:40,953 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region de39d8387da850a6da12447220425587 2024-12-09T11:03:40,953 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=241, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure de39d8387da850a6da12447220425587 2024-12-09T11:03:40,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=240 2024-12-09T11:03:40,954 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 5c02cf3502bb3ec9f86f1dfde13ba523 2024-12-09T11:03:40,954 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=240, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5c02cf3502bb3ec9f86f1dfde13ba523 2024-12-09T11:03:40,955 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=241, ppid=239, state=SUCCESS, hasLock=false; SnapshotRegionProcedure de39d8387da850a6da12447220425587 in 161 msec 2024-12-09T11:03:40,956 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=240, resume processing ppid=239 2024-12-09T11:03:40,956 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=240, ppid=239, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5c02cf3502bb3ec9f86f1dfde13ba523 in 161 msec 2024-12-09T11:03:40,956 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T11:03:40,957 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T11:03:40,958 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-09T11:03:40,958 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-09T11:03:40,958 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:03:40,958 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-09T11:03:40,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742424_1600 (size=74) 2024-12-09T11:03:40,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742424_1600 (size=74) 2024-12-09T11:03:40,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742424_1600 (size=74) 2024-12-09T11:03:40,964 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T11:03:40,965 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-09T11:03:40,965 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-09T11:03:40,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742425_1601 (size=697) 2024-12-09T11:03:40,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742425_1601 (size=697) 2024-12-09T11:03:40,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742425_1601 (size=697) 2024-12-09T11:03:40,976 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T11:03:40,980 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T11:03:40,980 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-09T11:03:40,982 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T11:03:40,982 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 239 2024-12-09T11:03:40,983 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=239, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 199 msec 2024-12-09T11:03:41,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-12-09T11:03:41,103 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-09T11:03:41,107 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42349 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,,1733742220120.5c02cf3502bb3ec9f86f1dfde13ba523. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T11:03:41,109 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33293 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,1,1733742220120.de39d8387da850a6da12447220425587. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T11:03:41,110 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-09T11:03:41,113 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T11:03:41,113 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1733742220120.5c02cf3502bb3ec9f86f1dfde13ba523. 2024-12-09T11:03:41,113 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T11:03:41,115 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-09T11:03:41,119 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-09T11:03:41,123 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-09T11:03:41,125 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-09T11:03:41,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733742221125 (current time:1733742221125). 2024-12-09T11:03:41,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T11:03:41,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-09T11:03:41,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T11:03:41,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5bfa23b6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:03:41,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T11:03:41,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T11:03:41,127 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T11:03:41,127 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T11:03:41,127 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T11:03:41,127 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7fc365fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:03:41,127 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T11:03:41,127 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T11:03:41,128 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:03:41,128 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59318, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T11:03:41,128 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45c27d72, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:03:41,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:03:41,129 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,33293,1733741767044, seqNum=-1] 2024-12-09T11:03:41,129 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:03:41,130 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33502, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:03:41,131 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815. 2024-12-09T11:03:41,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T11:03:41,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:03:41,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:03:41,131 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T11:03:41,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@719f47c6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:03:41,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ClusterIdFetcher(90): Going to request 3469f9ca0af3,35815,-1 for getting cluster id 2024-12-09T11:03:41,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T11:03:41,132 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'be837b94-00f8-48cd-b1fd-571be3b11602' 2024-12-09T11:03:41,132 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T11:03:41,133 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "be837b94-00f8-48cd-b1fd-571be3b11602" 2024-12-09T11:03:41,133 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e578b99, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:03:41,133 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3469f9ca0af3,35815,-1] 2024-12-09T11:03:41,133 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T11:03:41,133 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:03:41,133 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59334, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T11:03:41,134 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c97e70c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:03:41,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:03:41,135 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3469f9ca0af3,33293,1733741767044, seqNum=-1] 2024-12-09T11:03:41,135 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:03:41,136 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33504, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:03:41,137 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb., hostname=3469f9ca0af3,39691,1733741766880, seqNum=2] 2024-12-09T11:03:41,137 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:03:41,138 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59972, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:03:41,139 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815. 2024-12-09T11:03:41,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T11:03:41,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:03:41,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:03:41,139 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T11:03:41,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-09T11:03:41,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T11:03:41,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=242, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-09T11:03:41,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 242 2024-12-09T11:03:41,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-09T11:03:41,141 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T11:03:41,142 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T11:03:41,144 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T11:03:41,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742426_1602 (size=198) 2024-12-09T11:03:41,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742426_1602 (size=198) 2024-12-09T11:03:41,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742426_1602 (size=198) 2024-12-09T11:03:41,150 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T11:03:41,150 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=243, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5c02cf3502bb3ec9f86f1dfde13ba523}, {pid=244, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure de39d8387da850a6da12447220425587}] 2024-12-09T11:03:41,151 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=243, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5c02cf3502bb3ec9f86f1dfde13ba523 2024-12-09T11:03:41,151 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=244, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure de39d8387da850a6da12447220425587 2024-12-09T11:03:41,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-09T11:03:41,302 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42349 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=243 2024-12-09T11:03:41,302 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33293 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=244 2024-12-09T11:03:41,303 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733742220120.5c02cf3502bb3ec9f86f1dfde13ba523. 2024-12-09T11:03:41,303 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733742220120.de39d8387da850a6da12447220425587. 2024-12-09T11:03:41,303 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HRegion(2902): Flushing 5c02cf3502bb3ec9f86f1dfde13ba523 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-09T11:03:41,303 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HRegion(2902): Flushing de39d8387da850a6da12447220425587 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-09T11:03:41,328 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209221497ffc75d4931b957121a46ddb8c1_5c02cf3502bb3ec9f86f1dfde13ba523 is 71, key is 0bcda57f0fd0db3f0a3ef6885931f3aa/cf:q/1733742221107/Put/seqid=0 2024-12-09T11:03:41,328 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202412097af525573fb2443f8b7076bdf858d0ee_de39d8387da850a6da12447220425587 is 71, key is 14cbd81b7a6f883efbae65caac850fd4/cf:q/1733742221109/Put/seqid=0 2024-12-09T11:03:41,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742427_1603 (size=5101) 2024-12-09T11:03:41,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742427_1603 (size=5101) 2024-12-09T11:03:41,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742427_1603 (size=5101) 2024-12-09T11:03:41,347 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:03:41,351 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209221497ffc75d4931b957121a46ddb8c1_5c02cf3502bb3ec9f86f1dfde13ba523 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e20241209221497ffc75d4931b957121a46ddb8c1_5c02cf3502bb3ec9f86f1dfde13ba523 2024-12-09T11:03:41,352 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSkipTmp/5c02cf3502bb3ec9f86f1dfde13ba523/.tmp/cf/8e2bc7e344ca4d4eb6c8bcbd0b521bf3, store: [table=testtb-testExportFileSystemStateWithSkipTmp family=cf region=5c02cf3502bb3ec9f86f1dfde13ba523] 2024-12-09T11:03:41,353 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSkipTmp/5c02cf3502bb3ec9f86f1dfde13ba523/.tmp/cf/8e2bc7e344ca4d4eb6c8bcbd0b521bf3 is 220, key is 0d7a36d41a601d6a3ad77c7ec4e83ba73/cf:q/1733742221107/Put/seqid=0 2024-12-09T11:03:41,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742428_1604 (size=8172) 2024-12-09T11:03:41,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742428_1604 (size=8172) 2024-12-09T11:03:41,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742428_1604 (size=8172) 2024-12-09T11:03:41,360 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:03:41,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742429_1605 (size=5960) 2024-12-09T11:03:41,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742429_1605 (size=5960) 2024-12-09T11:03:41,363 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=199, hasBloomFilter=true, into tmp file hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSkipTmp/5c02cf3502bb3ec9f86f1dfde13ba523/.tmp/cf/8e2bc7e344ca4d4eb6c8bcbd0b521bf3 2024-12-09T11:03:41,365 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202412097af525573fb2443f8b7076bdf858d0ee_de39d8387da850a6da12447220425587 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b202412097af525573fb2443f8b7076bdf858d0ee_de39d8387da850a6da12447220425587 2024-12-09T11:03:41,365 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSkipTmp/de39d8387da850a6da12447220425587/.tmp/cf/00c4ed8836e04ecab9fc64ee64f4743f, store: [table=testtb-testExportFileSystemStateWithSkipTmp family=cf region=de39d8387da850a6da12447220425587] 2024-12-09T11:03:41,366 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSkipTmp/de39d8387da850a6da12447220425587/.tmp/cf/00c4ed8836e04ecab9fc64ee64f4743f is 220, key is 12bb1730c7e995ab13238689a0ec8ad81/cf:q/1733742221109/Put/seqid=0 2024-12-09T11:03:41,368 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSkipTmp/5c02cf3502bb3ec9f86f1dfde13ba523/.tmp/cf/8e2bc7e344ca4d4eb6c8bcbd0b521bf3 as hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSkipTmp/5c02cf3502bb3ec9f86f1dfde13ba523/cf/8e2bc7e344ca4d4eb6c8bcbd0b521bf3 2024-12-09T11:03:41,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742430_1606 (size=15527) 2024-12-09T11:03:41,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742430_1606 (size=15527) 2024-12-09T11:03:41,371 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSkipTmp/de39d8387da850a6da12447220425587/.tmp/cf/00c4ed8836e04ecab9fc64ee64f4743f 2024-12-09T11:03:41,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742430_1606 (size=15527) 2024-12-09T11:03:41,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742429_1605 (size=5960) 2024-12-09T11:03:41,373 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSkipTmp/5c02cf3502bb3ec9f86f1dfde13ba523/cf/8e2bc7e344ca4d4eb6c8bcbd0b521bf3, entries=3, sequenceid=6, filesize=5.8 K 2024-12-09T11:03:41,374 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 5c02cf3502bb3ec9f86f1dfde13ba523 in 71ms, sequenceid=6, compaction requested=false 2024-12-09T11:03:41,375 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSkipTmp' 2024-12-09T11:03:41,375 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HRegion(2603): Flush status journal for 5c02cf3502bb3ec9f86f1dfde13ba523: 2024-12-09T11:03:41,375 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1733742220120.5c02cf3502bb3ec9f86f1dfde13ba523. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-09T11:03:41,375 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1733742220120.5c02cf3502bb3ec9f86f1dfde13ba523.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-09T11:03:41,375 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T11:03:41,375 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSkipTmp/5c02cf3502bb3ec9f86f1dfde13ba523/cf/8e2bc7e344ca4d4eb6c8bcbd0b521bf3] hfiles 2024-12-09T11:03:41,375 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSkipTmp/5c02cf3502bb3ec9f86f1dfde13ba523/cf/8e2bc7e344ca4d4eb6c8bcbd0b521bf3 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-09T11:03:41,376 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSkipTmp/de39d8387da850a6da12447220425587/.tmp/cf/00c4ed8836e04ecab9fc64ee64f4743f as hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSkipTmp/de39d8387da850a6da12447220425587/cf/00c4ed8836e04ecab9fc64ee64f4743f 2024-12-09T11:03:41,381 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSkipTmp/de39d8387da850a6da12447220425587/cf/00c4ed8836e04ecab9fc64ee64f4743f, entries=47, sequenceid=6, filesize=15.2 K 2024-12-09T11:03:41,382 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for de39d8387da850a6da12447220425587 in 79ms, sequenceid=6, compaction requested=false 2024-12-09T11:03:41,382 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HRegion(2603): Flush status journal for de39d8387da850a6da12447220425587: 2024-12-09T11:03:41,382 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1733742220120.de39d8387da850a6da12447220425587. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-09T11:03:41,382 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1733742220120.de39d8387da850a6da12447220425587.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-09T11:03:41,382 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T11:03:41,382 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSkipTmp/de39d8387da850a6da12447220425587/cf/00c4ed8836e04ecab9fc64ee64f4743f] hfiles 2024-12-09T11:03:41,382 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSkipTmp/de39d8387da850a6da12447220425587/cf/00c4ed8836e04ecab9fc64ee64f4743f for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-09T11:03:41,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742431_1607 (size=121) 2024-12-09T11:03:41,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742431_1607 (size=121) 2024-12-09T11:03:41,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742431_1607 (size=121) 2024-12-09T11:03:41,393 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733742220120.5c02cf3502bb3ec9f86f1dfde13ba523. 2024-12-09T11:03:41,393 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=243 2024-12-09T11:03:41,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=243 2024-12-09T11:03:41,393 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 5c02cf3502bb3ec9f86f1dfde13ba523 2024-12-09T11:03:41,394 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=243, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5c02cf3502bb3ec9f86f1dfde13ba523 2024-12-09T11:03:41,396 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=243, ppid=242, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5c02cf3502bb3ec9f86f1dfde13ba523 in 244 msec 2024-12-09T11:03:41,403 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region a0d08af07fc0beaa578cbd208923b1fb, had cached 0 bytes from a total of 5791 2024-12-09T11:03:41,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742432_1608 (size=121) 2024-12-09T11:03:41,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742432_1608 (size=121) 2024-12-09T11:03:41,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742432_1608 (size=121) 2024-12-09T11:03:41,421 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733742220120.de39d8387da850a6da12447220425587. 2024-12-09T11:03:41,421 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/3469f9ca0af3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=244 2024-12-09T11:03:41,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster(4169): Remote procedure done, pid=244 2024-12-09T11:03:41,422 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region de39d8387da850a6da12447220425587 2024-12-09T11:03:41,422 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=244, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure de39d8387da850a6da12447220425587 2024-12-09T11:03:41,425 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=244, resume processing ppid=242 2024-12-09T11:03:41,425 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=244, ppid=242, state=SUCCESS, hasLock=false; SnapshotRegionProcedure de39d8387da850a6da12447220425587 in 273 msec 2024-12-09T11:03:41,425 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T11:03:41,426 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T11:03:41,427 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-09T11:03:41,427 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-09T11:03:41,427 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:03:41,428 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b202412097af525573fb2443f8b7076bdf858d0ee_de39d8387da850a6da12447220425587, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e20241209221497ffc75d4931b957121a46ddb8c1_5c02cf3502bb3ec9f86f1dfde13ba523] hfiles 2024-12-09T11:03:41,428 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b202412097af525573fb2443f8b7076bdf858d0ee_de39d8387da850a6da12447220425587 2024-12-09T11:03:41,428 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e20241209221497ffc75d4931b957121a46ddb8c1_5c02cf3502bb3ec9f86f1dfde13ba523 2024-12-09T11:03:41,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742433_1609 (size=305) 2024-12-09T11:03:41,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742433_1609 (size=305) 2024-12-09T11:03:41,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742433_1609 (size=305) 2024-12-09T11:03:41,437 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T11:03:41,437 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-09T11:03:41,438 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-09T11:03:41,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742434_1610 (size=1007) 2024-12-09T11:03:41,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742434_1610 (size=1007) 2024-12-09T11:03:41,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742434_1610 (size=1007) 2024-12-09T11:03:41,459 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T11:03:41,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-09T11:03:41,465 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T11:03:41,466 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-09T11:03:41,467 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T11:03:41,467 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 242 2024-12-09T11:03:41,468 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=242, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 327 msec 2024-12-09T11:03:41,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-09T11:03:41,774 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-09T11:03:41,774 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742221774 2024-12-09T11:03:41,775 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:35869, tgtDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742221774, rawTgtDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742221774, srcFsUri=hdfs://localhost:35869, srcDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T11:03:41,807 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:35869, inputRoot=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6 2024-12-09T11:03:41,807 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1751713061_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742221774, skipTmp=true, initialOutputSnapshotDir=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742221774/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-09T11:03:41,808 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-09T11:03:41,813 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742221774/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-09T11:03:41,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742435_1611 (size=198) 2024-12-09T11:03:41,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742435_1611 (size=198) 2024-12-09T11:03:41,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742435_1611 (size=198) 2024-12-09T11:03:41,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742436_1612 (size=1007) 2024-12-09T11:03:41,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742436_1612 (size=1007) 2024-12-09T11:03:41,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742436_1612 (size=1007) 2024-12-09T11:03:41,848 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:03:41,849 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:03:41,849 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:03:43,089 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/hadoop-14435483807111410030.jar 2024-12-09T11:03:43,090 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:03:43,090 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:03:43,182 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/hadoop-14745858740492836582.jar 2024-12-09T11:03:43,182 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:03:43,183 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:03:43,183 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:03:43,183 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:03:43,184 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:03:43,184 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-09T11:03:43,184 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-09T11:03:43,185 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-09T11:03:43,185 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-09T11:03:43,185 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-09T11:03:43,186 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-09T11:03:43,186 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-09T11:03:43,186 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-09T11:03:43,187 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-09T11:03:43,187 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-09T11:03:43,187 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-09T11:03:43,188 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-09T11:03:43,188 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T11:03:43,188 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T11:03:43,189 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T11:03:43,189 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T11:03:43,189 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T11:03:43,190 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T11:03:43,190 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T11:03:43,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742437_1613 (size=24020) 2024-12-09T11:03:43,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742437_1613 (size=24020) 2024-12-09T11:03:43,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742437_1613 (size=24020) 2024-12-09T11:03:43,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742438_1614 (size=77755) 2024-12-09T11:03:43,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742438_1614 (size=77755) 2024-12-09T11:03:43,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742438_1614 (size=77755) 2024-12-09T11:03:43,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742439_1615 (size=131360) 2024-12-09T11:03:43,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742439_1615 (size=131360) 2024-12-09T11:03:43,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742439_1615 (size=131360) 2024-12-09T11:03:43,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742440_1616 (size=111793) 2024-12-09T11:03:43,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742440_1616 (size=111793) 2024-12-09T11:03:43,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742440_1616 (size=111793) 2024-12-09T11:03:43,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742441_1617 (size=1832290) 2024-12-09T11:03:43,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742441_1617 (size=1832290) 2024-12-09T11:03:43,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742441_1617 (size=1832290) 2024-12-09T11:03:44,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742442_1618 (size=8360282) 2024-12-09T11:03:44,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742442_1618 (size=8360282) 2024-12-09T11:03:44,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742442_1618 (size=8360282) 2024-12-09T11:03:44,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742443_1619 (size=503880) 2024-12-09T11:03:44,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742443_1619 (size=503880) 2024-12-09T11:03:44,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742443_1619 (size=503880) 2024-12-09T11:03:44,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742444_1620 (size=322274) 2024-12-09T11:03:44,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742444_1620 (size=322274) 2024-12-09T11:03:44,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742444_1620 (size=322274) 2024-12-09T11:03:44,558 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0010_000001 (auth:SIMPLE) from 127.0.0.1:46162 2024-12-09T11:03:44,576 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_2/usercache/jenkins/appcache/application_1733741775522_0010/container_1733741775522_0010_01_000001/launch_container.sh] 2024-12-09T11:03:44,577 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_2/usercache/jenkins/appcache/application_1733741775522_0010/container_1733741775522_0010_01_000001/container_tokens] 2024-12-09T11:03:44,577 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_2/usercache/jenkins/appcache/application_1733741775522_0010/container_1733741775522_0010_01_000001/sysfs] 2024-12-09T11:03:44,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742445_1621 (size=20406) 2024-12-09T11:03:44,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742445_1621 (size=20406) 2024-12-09T11:03:44,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742445_1621 (size=20406) 2024-12-09T11:03:44,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742446_1622 (size=45609) 2024-12-09T11:03:44,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742446_1622 (size=45609) 2024-12-09T11:03:44,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742446_1622 (size=45609) 2024-12-09T11:03:44,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742447_1623 (size=136454) 2024-12-09T11:03:44,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742447_1623 (size=136454) 2024-12-09T11:03:44,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742447_1623 (size=136454) 2024-12-09T11:03:44,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742448_1624 (size=1597136) 2024-12-09T11:03:44,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742448_1624 (size=1597136) 2024-12-09T11:03:44,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742448_1624 (size=1597136) 2024-12-09T11:03:44,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742449_1625 (size=30873) 2024-12-09T11:03:44,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742449_1625 (size=30873) 2024-12-09T11:03:44,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742449_1625 (size=30873) 2024-12-09T11:03:44,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742450_1626 (size=29229) 2024-12-09T11:03:44,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742450_1626 (size=29229) 2024-12-09T11:03:44,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742450_1626 (size=29229) 2024-12-09T11:03:44,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742451_1627 (size=903861) 2024-12-09T11:03:44,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742451_1627 (size=903861) 2024-12-09T11:03:44,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742451_1627 (size=903861) 2024-12-09T11:03:44,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742452_1628 (size=5175431) 2024-12-09T11:03:44,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742452_1628 (size=5175431) 2024-12-09T11:03:44,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742452_1628 (size=5175431) 2024-12-09T11:03:44,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742453_1629 (size=232881) 2024-12-09T11:03:44,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742453_1629 (size=232881) 2024-12-09T11:03:44,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742453_1629 (size=232881) 2024-12-09T11:03:44,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742454_1630 (size=1323991) 2024-12-09T11:03:44,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742454_1630 (size=1323991) 2024-12-09T11:03:44,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742454_1630 (size=1323991) 2024-12-09T11:03:44,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742455_1631 (size=4695811) 2024-12-09T11:03:44,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742455_1631 (size=4695811) 2024-12-09T11:03:44,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742455_1631 (size=4695811) 2024-12-09T11:03:45,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742456_1632 (size=1877034) 2024-12-09T11:03:45,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742456_1632 (size=1877034) 2024-12-09T11:03:45,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742456_1632 (size=1877034) 2024-12-09T11:03:45,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742457_1633 (size=6425021) 2024-12-09T11:03:45,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742457_1633 (size=6425021) 2024-12-09T11:03:45,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742457_1633 (size=6425021) 2024-12-09T11:03:45,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742458_1634 (size=443171) 2024-12-09T11:03:45,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742458_1634 (size=443171) 2024-12-09T11:03:45,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742458_1634 (size=443171) 2024-12-09T11:03:45,358 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T11:03:45,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742459_1635 (size=217555) 2024-12-09T11:03:45,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742459_1635 (size=217555) 2024-12-09T11:03:45,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742459_1635 (size=217555) 2024-12-09T11:03:45,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742460_1636 (size=4188619) 2024-12-09T11:03:45,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742460_1636 (size=4188619) 2024-12-09T11:03:45,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742460_1636 (size=4188619) 2024-12-09T11:03:46,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742461_1637 (size=127628) 2024-12-09T11:03:46,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742461_1637 (size=127628) 2024-12-09T11:03:46,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742461_1637 (size=127628) 2024-12-09T11:03:46,391 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-09T11:03:46,414 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemStateWithSkipTmp' hfile list 2024-12-09T11:03:46,434 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=15.2 K 2024-12-09T11:03:46,434 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.0 K 2024-12-09T11:03:46,434 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.8 K 2024-12-09T11:03:46,435 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.0 K 2024-12-09T11:03:46,470 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T11:03:46,470 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp Metrics about Tables on a single HBase RegionServer 2024-12-09T11:03:46,471 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-09T11:03:46,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742462_1638 (size=1079) 2024-12-09T11:03:46,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742462_1638 (size=1079) 2024-12-09T11:03:46,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742462_1638 (size=1079) 2024-12-09T11:03:46,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742463_1639 (size=35) 2024-12-09T11:03:46,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742463_1639 (size=35) 2024-12-09T11:03:46,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742463_1639 (size=35) 2024-12-09T11:03:46,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742464_1640 (size=304251) 2024-12-09T11:03:46,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742464_1640 (size=304251) 2024-12-09T11:03:46,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742464_1640 (size=304251) 2024-12-09T11:03:46,870 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T11:03:46,870 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T11:03:47,302 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0011_000001 (auth:SIMPLE) from 127.0.0.1:54558 2024-12-09T11:03:51,975 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T11:03:58,957 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0011_000001 (auth:SIMPLE) from 127.0.0.1:51252 2024-12-09T11:03:59,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742465_1641 (size=349973) 2024-12-09T11:03:59,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742465_1641 (size=349973) 2024-12-09T11:03:59,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742465_1641 (size=349973) 2024-12-09T11:04:01,564 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0011_000001 (auth:SIMPLE) from 127.0.0.1:42266 2024-12-09T11:04:01,571 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0011_000001 (auth:SIMPLE) from 127.0.0.1:34108 2024-12-09T11:04:02,154 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0011_000001 (auth:SIMPLE) from 127.0.0.1:42276 2024-12-09T11:04:02,164 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733741775522_0011_000001 (auth:SIMPLE) from 127.0.0.1:34110 2024-12-09T11:04:04,510 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T11:04:04,580 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733741775522_0011_01_000006 while processing FINISH_CONTAINERS event 2024-12-09T11:04:09,154 WARN [regionserver/3469f9ca0af3:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 6, running: 0 2024-12-09T11:04:12,471 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 33657 2024-12-09T11:04:13,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742466_1642 (size=15527) 2024-12-09T11:04:13,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742466_1642 (size=15527) 2024-12-09T11:04:13,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742466_1642 (size=15527) 2024-12-09T11:04:14,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742468_1644 (size=5960) 2024-12-09T11:04:14,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742468_1644 (size=5960) 2024-12-09T11:04:14,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742468_1644 (size=5960) 2024-12-09T11:04:14,551 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_3/usercache/jenkins/appcache/application_1733741775522_0011/container_1733741775522_0011_01_000004/launch_container.sh] 2024-12-09T11:04:14,551 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_3/usercache/jenkins/appcache/application_1733741775522_0011/container_1733741775522_0011_01_000004/container_tokens] 2024-12-09T11:04:14,551 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_3/usercache/jenkins/appcache/application_1733741775522_0011/container_1733741775522_0011_01_000004/sysfs] 2024-12-09T11:04:14,602 DEBUG [master/3469f9ca0af3:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region de39d8387da850a6da12447220425587 changed from -1.0 to 0.0, refreshing cache 2024-12-09T11:04:14,602 DEBUG [master/3469f9ca0af3:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 5c02cf3502bb3ec9f86f1dfde13ba523 changed from -1.0 to 0.0, refreshing cache 2024-12-09T11:04:15,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742469_1645 (size=8172) 2024-12-09T11:04:15,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742469_1645 (size=8172) 2024-12-09T11:04:15,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742469_1645 (size=8172) 2024-12-09T11:04:15,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742470_1646 (size=5101) 2024-12-09T11:04:15,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742470_1646 (size=5101) 2024-12-09T11:04:15,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742470_1646 (size=5101) 2024-12-09T11:04:15,607 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_0/usercache/jenkins/appcache/application_1733741775522_0011/container_1733741775522_0011_01_000003/launch_container.sh] 2024-12-09T11:04:15,607 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_0/usercache/jenkins/appcache/application_1733741775522_0011/container_1733741775522_0011_01_000003/container_tokens] 2024-12-09T11:04:15,607 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_0/usercache/jenkins/appcache/application_1733741775522_0011/container_1733741775522_0011_01_000003/sysfs] 2024-12-09T11:04:15,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742467_1643 (size=31811) 2024-12-09T11:04:15,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742467_1643 (size=31811) 2024-12-09T11:04:15,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742467_1643 (size=31811) 2024-12-09T11:04:15,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742471_1647 (size=477) 2024-12-09T11:04:15,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742471_1647 (size=477) 2024-12-09T11:04:15,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742471_1647 (size=477) 2024-12-09T11:04:15,937 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_3/usercache/jenkins/appcache/application_1733741775522_0011/container_1733741775522_0011_01_000005/launch_container.sh] 2024-12-09T11:04:15,937 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_3/usercache/jenkins/appcache/application_1733741775522_0011/container_1733741775522_0011_01_000005/container_tokens] 2024-12-09T11:04:15,937 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-0_3/usercache/jenkins/appcache/application_1733741775522_0011/container_1733741775522_0011_01_000005/sysfs] 2024-12-09T11:04:16,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742472_1648 (size=31811) 2024-12-09T11:04:16,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742472_1648 (size=31811) 2024-12-09T11:04:16,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742472_1648 (size=31811) 2024-12-09T11:04:16,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742473_1649 (size=349973) 2024-12-09T11:04:16,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742473_1649 (size=349973) 2024-12-09T11:04:16,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742473_1649 (size=349973) 2024-12-09T11:04:17,714 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-09T11:04:17,714 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-09T11:04:17,760 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-09T11:04:17,760 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-09T11:04:17,761 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-09T11:04:17,761 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1751713061_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-09T11:04:17,761 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-09T11:04:17,761 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-09T11:04:17,761 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1751713061_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742221774/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742221774/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-09T11:04:17,772 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742221774/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-09T11:04:17,772 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/export-test/export-1733742221774/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-09T11:04:17,799 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T11:04:17,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=245, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T11:04:17,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-12-09T11:04:17,822 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733742257822"}]},"ts":"1733742257822"} 2024-12-09T11:04:17,860 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLING in hbase:meta 2024-12-09T11:04:17,860 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLING 2024-12-09T11:04:17,866 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=246, ppid=245, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp}] 2024-12-09T11:04:17,878 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=247, ppid=246, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=5c02cf3502bb3ec9f86f1dfde13ba523, UNASSIGN}, {pid=248, ppid=246, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=de39d8387da850a6da12447220425587, UNASSIGN}] 2024-12-09T11:04:17,886 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=248, ppid=246, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=de39d8387da850a6da12447220425587, UNASSIGN 2024-12-09T11:04:17,889 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=247, ppid=246, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=5c02cf3502bb3ec9f86f1dfde13ba523, UNASSIGN 2024-12-09T11:04:17,894 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=248 updating hbase:meta row=de39d8387da850a6da12447220425587, regionState=CLOSING, regionLocation=3469f9ca0af3,33293,1733741767044 2024-12-09T11:04:17,902 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=247 updating hbase:meta row=5c02cf3502bb3ec9f86f1dfde13ba523, regionState=CLOSING, regionLocation=3469f9ca0af3,42349,1733741767108 2024-12-09T11:04:17,911 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=248, ppid=246, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=de39d8387da850a6da12447220425587, UNASSIGN because future has completed 2024-12-09T11:04:17,913 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T11:04:17,913 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=249, ppid=248, state=RUNNABLE, hasLock=false; CloseRegionProcedure de39d8387da850a6da12447220425587, server=3469f9ca0af3,33293,1733741767044}] 2024-12-09T11:04:17,918 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=247, ppid=246, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=5c02cf3502bb3ec9f86f1dfde13ba523, UNASSIGN because future has completed 2024-12-09T11:04:17,918 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T11:04:17,919 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=250, ppid=247, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5c02cf3502bb3ec9f86f1dfde13ba523, server=3469f9ca0af3,42349,1733741767108}] 2024-12-09T11:04:17,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-12-09T11:04:18,077 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] handler.UnassignRegionHandler(122): Close de39d8387da850a6da12447220425587 2024-12-09T11:04:18,077 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T11:04:18,077 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1722): Closing de39d8387da850a6da12447220425587, disabling compactions & flushes 2024-12-09T11:04:18,077 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1733742220120.de39d8387da850a6da12447220425587. 2024-12-09T11:04:18,077 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733742220120.de39d8387da850a6da12447220425587. 2024-12-09T11:04:18,077 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733742220120.de39d8387da850a6da12447220425587. after waiting 0 ms 2024-12-09T11:04:18,077 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1733742220120.de39d8387da850a6da12447220425587. 2024-12-09T11:04:18,082 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] handler.UnassignRegionHandler(122): Close 5c02cf3502bb3ec9f86f1dfde13ba523 2024-12-09T11:04:18,083 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T11:04:18,083 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1722): Closing 5c02cf3502bb3ec9f86f1dfde13ba523, disabling compactions & flushes 2024-12-09T11:04:18,083 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1733742220120.5c02cf3502bb3ec9f86f1dfde13ba523. 2024-12-09T11:04:18,083 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733742220120.5c02cf3502bb3ec9f86f1dfde13ba523. 2024-12-09T11:04:18,083 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733742220120.5c02cf3502bb3ec9f86f1dfde13ba523. after waiting 0 ms 2024-12-09T11:04:18,083 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1733742220120.5c02cf3502bb3ec9f86f1dfde13ba523. 2024-12-09T11:04:18,084 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSkipTmp/de39d8387da850a6da12447220425587/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T11:04:18,090 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T11:04:18,090 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1733742220120.de39d8387da850a6da12447220425587. 2024-12-09T11:04:18,090 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1676): Region close journal for de39d8387da850a6da12447220425587: Waiting for close lock at 1733742258077Running coprocessor pre-close hooks at 1733742258077Disabling compacts and flushes for region at 1733742258077Disabling writes for close at 1733742258077Writing region close event to WAL at 1733742258078 (+1 ms)Running coprocessor post-close hooks at 1733742258090 (+12 ms)Closed at 1733742258090 2024-12-09T11:04:18,093 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] handler.UnassignRegionHandler(157): Closed de39d8387da850a6da12447220425587 2024-12-09T11:04:18,094 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=248 updating hbase:meta row=de39d8387da850a6da12447220425587, regionState=CLOSED 2024-12-09T11:04:18,101 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSkipTmp/5c02cf3502bb3ec9f86f1dfde13ba523/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T11:04:18,102 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T11:04:18,102 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=249, ppid=248, state=RUNNABLE, hasLock=false; CloseRegionProcedure de39d8387da850a6da12447220425587, server=3469f9ca0af3,33293,1733741767044 because future has completed 2024-12-09T11:04:18,102 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1733742220120.5c02cf3502bb3ec9f86f1dfde13ba523. 2024-12-09T11:04:18,102 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1676): Region close journal for 5c02cf3502bb3ec9f86f1dfde13ba523: Waiting for close lock at 1733742258083Running coprocessor pre-close hooks at 1733742258083Disabling compacts and flushes for region at 1733742258083Disabling writes for close at 1733742258083Writing region close event to WAL at 1733742258090 (+7 ms)Running coprocessor post-close hooks at 1733742258102 (+12 ms)Closed at 1733742258102 2024-12-09T11:04:18,107 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] handler.UnassignRegionHandler(157): Closed 5c02cf3502bb3ec9f86f1dfde13ba523 2024-12-09T11:04:18,107 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=247 updating hbase:meta row=5c02cf3502bb3ec9f86f1dfde13ba523, regionState=CLOSED 2024-12-09T11:04:18,110 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=249, resume processing ppid=248 2024-12-09T11:04:18,110 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=249, ppid=248, state=SUCCESS, hasLock=false; CloseRegionProcedure de39d8387da850a6da12447220425587, server=3469f9ca0af3,33293,1733741767044 in 193 msec 2024-12-09T11:04:18,111 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=250, ppid=247, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5c02cf3502bb3ec9f86f1dfde13ba523, server=3469f9ca0af3,42349,1733741767108 because future has completed 2024-12-09T11:04:18,118 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=248, ppid=246, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=de39d8387da850a6da12447220425587, UNASSIGN in 232 msec 2024-12-09T11:04:18,123 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=250, resume processing ppid=247 2024-12-09T11:04:18,123 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=250, ppid=247, state=SUCCESS, hasLock=false; CloseRegionProcedure 5c02cf3502bb3ec9f86f1dfde13ba523, server=3469f9ca0af3,42349,1733741767108 in 200 msec 2024-12-09T11:04:18,125 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=247, resume processing ppid=246 2024-12-09T11:04:18,125 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=247, ppid=246, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=5c02cf3502bb3ec9f86f1dfde13ba523, UNASSIGN in 245 msec 2024-12-09T11:04:18,136 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=246, resume processing ppid=245 2024-12-09T11:04:18,136 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=246, ppid=245, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 267 msec 2024-12-09T11:04:18,138 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733742258138"}]},"ts":"1733742258138"} 2024-12-09T11:04:18,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-12-09T11:04:18,146 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLED in hbase:meta 2024-12-09T11:04:18,146 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLED 2024-12-09T11:04:18,155 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=245, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 355 msec 2024-12-09T11:04:18,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-12-09T11:04:18,452 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-09T11:04:18,453 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T11:04:18,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] procedure2.ProcedureExecutor(1139): Stored pid=251, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T11:04:18,455 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=251, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T11:04:18,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T11:04:18,456 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=251, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T11:04:18,459 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39691 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T11:04:18,461 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSkipTmp/5c02cf3502bb3ec9f86f1dfde13ba523 2024-12-09T11:04:18,461 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSkipTmp/de39d8387da850a6da12447220425587 2024-12-09T11:04:18,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T11:04:18,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T11:04:18,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T11:04:18,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T11:04:18,465 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-09T11:04:18,465 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-09T11:04:18,465 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-09T11:04:18,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T11:04:18,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T11:04:18,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:04:18,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:04:18,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T11:04:18,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:04:18,471 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data null 2024-12-09T11:04:18,471 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-09T11:04:18,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T11:04:18,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=251 2024-12-09T11:04:18,473 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T11:04:18,473 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T11:04:18,473 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T11:04:18,475 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T11:04:18,477 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSkipTmp/5c02cf3502bb3ec9f86f1dfde13ba523/cf, FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSkipTmp/5c02cf3502bb3ec9f86f1dfde13ba523/recovered.edits] 2024-12-09T11:04:18,484 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSkipTmp/de39d8387da850a6da12447220425587/cf, FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSkipTmp/de39d8387da850a6da12447220425587/recovered.edits] 2024-12-09T11:04:18,523 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSkipTmp/5c02cf3502bb3ec9f86f1dfde13ba523/cf/8e2bc7e344ca4d4eb6c8bcbd0b521bf3 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/5c02cf3502bb3ec9f86f1dfde13ba523/cf/8e2bc7e344ca4d4eb6c8bcbd0b521bf3 2024-12-09T11:04:18,530 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSkipTmp/de39d8387da850a6da12447220425587/cf/00c4ed8836e04ecab9fc64ee64f4743f to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/de39d8387da850a6da12447220425587/cf/00c4ed8836e04ecab9fc64ee64f4743f 2024-12-09T11:04:18,562 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSkipTmp/de39d8387da850a6da12447220425587/recovered.edits/9.seqid to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/de39d8387da850a6da12447220425587/recovered.edits/9.seqid 2024-12-09T11:04:18,564 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSkipTmp/de39d8387da850a6da12447220425587 2024-12-09T11:04:18,565 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSkipTmp/5c02cf3502bb3ec9f86f1dfde13ba523/recovered.edits/9.seqid to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/5c02cf3502bb3ec9f86f1dfde13ba523/recovered.edits/9.seqid 2024-12-09T11:04:18,570 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region cca3861865154a4d1c7857f88e2ede7a, had cached 0 bytes from a total of 6484 2024-12-09T11:04:18,570 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region f33da0e723b1140f1a0ea77cd0d168fd, had cached 0 bytes from a total of 14067 2024-12-09T11:04:18,571 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testtb-testExportFileSystemStateWithSkipTmp/5c02cf3502bb3ec9f86f1dfde13ba523 2024-12-09T11:04:18,573 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSkipTmp regions 2024-12-09T11:04:18,578 DEBUG [PEWorker-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a 2024-12-09T11:04:18,585 DEBUG [PEWorker-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf] 2024-12-09T11:04:18,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=251 2024-12-09T11:04:18,639 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b202412097af525573fb2443f8b7076bdf858d0ee_de39d8387da850a6da12447220425587 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b202412097af525573fb2443f8b7076bdf858d0ee_de39d8387da850a6da12447220425587 2024-12-09T11:04:18,656 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e20241209221497ffc75d4931b957121a46ddb8c1_5c02cf3502bb3ec9f86f1dfde13ba523 to hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e20241209221497ffc75d4931b957121a46ddb8c1_5c02cf3502bb3ec9f86f1dfde13ba523 2024-12-09T11:04:18,662 DEBUG [PEWorker-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a 2024-12-09T11:04:18,690 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=251, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T11:04:18,694 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_2/usercache/jenkins/appcache/application_1733741775522_0011/container_1733741775522_0011_01_000002/launch_container.sh] 2024-12-09T11:04:18,694 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_2/usercache/jenkins/appcache/application_1733741775522_0011/container_1733741775522_0011_01_000002/container_tokens] 2024-12-09T11:04:18,694 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_2/usercache/jenkins/appcache/application_1733741775522_0011/container_1733741775522_0011_01_000002/sysfs] 2024-12-09T11:04:18,695 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSkipTmp from hbase:meta 2024-12-09T11:04:18,705 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithSkipTmp' descriptor. 2024-12-09T11:04:18,714 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=251, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T11:04:18,715 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithSkipTmp' from region states. 2024-12-09T11:04:18,715 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1733742220120.5c02cf3502bb3ec9f86f1dfde13ba523.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733742258715"}]},"ts":"9223372036854775807"} 2024-12-09T11:04:18,715 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1733742220120.de39d8387da850a6da12447220425587.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733742258715"}]},"ts":"9223372036854775807"} 2024-12-09T11:04:18,725 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-09T11:04:18,725 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 5c02cf3502bb3ec9f86f1dfde13ba523, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733742220120.5c02cf3502bb3ec9f86f1dfde13ba523.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => de39d8387da850a6da12447220425587, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733742220120.de39d8387da850a6da12447220425587.', STARTKEY => '1', ENDKEY => ''}] 2024-12-09T11:04:18,725 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithSkipTmp' as deleted. 2024-12-09T11:04:18,725 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733742258725"}]},"ts":"9223372036854775807"} 2024-12-09T11:04:18,742 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSkipTmp state from META 2024-12-09T11:04:18,750 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=251, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T11:04:18,762 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=251, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 297 msec 2024-12-09T11:04:18,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=251 2024-12-09T11:04:18,812 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T11:04:18,812 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-09T11:04:18,820 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-12-09T11:04:18,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-09T11:04:18,823 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-12-09T11:04:18,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-09T11:04:18,855 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=823 (was 824), OpenFileDescriptor=822 (was 827), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=2655 (was 1007) - SystemLoadAverage LEAK? -, ProcessCount=19 (was 21), AvailableMemoryMB=2099 (was 1818) - AvailableMemoryMB LEAK? - 2024-12-09T11:04:18,855 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=823 is superior to 500 2024-12-09T11:04:18,855 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2377): Stopping mini mapreduce cluster... 2024-12-09T11:04:18,865 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3ae57486{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-09T11:04:18,868 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6f739d05{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T11:04:18,869 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T11:04:18,869 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5fe504e9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-09T11:04:18,869 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@43d0fa3d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/hadoop.log.dir/,STOPPED} 2024-12-09T11:04:18,900 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733741775522_0011_01_000001 is : 143 2024-12-09T11:04:18,919 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_3/usercache/jenkins/appcache/application_1733741775522_0011/container_1733741775522_0011_01_000001/launch_container.sh] 2024-12-09T11:04:18,919 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_3/usercache/jenkins/appcache/application_1733741775522_0011/container_1733741775522_0011_01_000001/container_tokens] 2024-12-09T11:04:18,919 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_187716441/yarn-3177453611/MiniMRCluster_187716441-localDir-nm-1_3/usercache/jenkins/appcache/application_1733741775522_0011/container_1733741775522_0011_01_000001/sysfs] 2024-12-09T11:04:23,610 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T11:04:26,403 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region a0d08af07fc0beaa578cbd208923b1fb, had cached 0 bytes from a total of 5791 2024-12-09T11:04:26,469 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T11:04:31,971 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T11:04:34,510 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T11:04:35,891 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@ef45329{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-09T11:04:35,892 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5e0399ee{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T11:04:35,892 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T11:04:35,892 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4819e16b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-09T11:04:35,892 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f710d65{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/hadoop.log.dir/,STOPPED} 2024-12-09T11:04:52,899 ERROR [Thread[Thread-402,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-09T11:04:52,900 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7148d4a4{cluster,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-09T11:04:52,901 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5320dc1e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T11:04:52,901 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T11:04:52,901 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2b8eb224{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-09T11:04:52,901 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6688010e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/hadoop.log.dir/,STOPPED} 2024-12-09T11:04:52,904 WARN [ApplicationMaster Launcher {}] amlauncher.ApplicationMasterLauncher$LauncherThread(122): org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher$LauncherThread interrupted. Returning. 2024-12-09T11:04:52,909 ERROR [SchedulerEventDispatcher:Event Processor {}] event.EventDispatcher$EventProcessor(72): Returning, interrupted : java.lang.InterruptedException 2024-12-09T11:04:52,910 ERROR [ResourceManager Event Processor Monitor {}] resourcemanager.ResourceManager$SchedulerEventDispatcher$EventProcessorMonitor(1193): Returning, interrupted : java.lang.InterruptedException: sleep interrupted 2024-12-09T11:04:52,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741830_1006 (size=1172338) 2024-12-09T11:04:52,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741830_1006 (size=1172338) 2024-12-09T11:04:52,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741830_1006 (size=1172338) 2024-12-09T11:04:52,918 ERROR [Thread[Thread-425,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-09T11:04:52,920 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@796a1222{jobhistory,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-09T11:04:52,921 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@56f1a1bb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T11:04:52,921 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T11:04:52,921 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@663ccefb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-09T11:04:52,921 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@666ff87d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/hadoop.log.dir/,STOPPED} 2024-12-09T11:04:52,923 ERROR [Thread[Thread-384,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-09T11:04:52,923 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2380): Mini mapreduce cluster stopped 2024-12-09T11:04:52,923 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T11:04:52,923 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T11:04:52,923 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T11:04:52,923 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:04:52,923 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:04:52,923 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T11:04:52,923 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T11:04:52,923 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1607255747, stopped=false 2024-12-09T11:04:52,924 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T11:04:52,924 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-09T11:04:52,924 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3469f9ca0af3,35815,1733741765917 2024-12-09T11:04:52,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T11:04:52,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T11:04:52,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T11:04:52,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:04:52,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:04:52,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:04:52,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T11:04:52,926 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T11:04:52,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:04:52,926 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T11:04:52,926 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T11:04:52,926 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:04:52,926 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3469f9ca0af3,39691,1733741766880' ***** 2024-12-09T11:04:52,927 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T11:04:52,927 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T11:04:52,927 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T11:04:52,927 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T11:04:52,927 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3469f9ca0af3,33293,1733741767044' ***** 2024-12-09T11:04:52,927 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T11:04:52,927 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T11:04:52,927 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3469f9ca0af3,42349,1733741767108' ***** 2024-12-09T11:04:52,927 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T11:04:52,927 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T11:04:52,927 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T11:04:52,927 INFO [RS:1;3469f9ca0af3:33293 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T11:04:52,927 INFO [RS:2;3469f9ca0af3:42349 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T11:04:52,927 INFO [RS:2;3469f9ca0af3:42349 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T11:04:52,927 INFO [RS:1;3469f9ca0af3:33293 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T11:04:52,928 INFO [RS:2;3469f9ca0af3:42349 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T11:04:52,928 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T11:04:52,928 INFO [RS:1;3469f9ca0af3:33293 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T11:04:52,928 INFO [RS:2;3469f9ca0af3:42349 {}] regionserver.HRegionServer(959): stopping server 3469f9ca0af3,42349,1733741767108 2024-12-09T11:04:52,928 INFO [RS:2;3469f9ca0af3:42349 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T11:04:52,928 INFO [RS:1;3469f9ca0af3:33293 {}] regionserver.HRegionServer(3091): Received CLOSE for f33da0e723b1140f1a0ea77cd0d168fd 2024-12-09T11:04:52,928 INFO [RS:2;3469f9ca0af3:42349 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;3469f9ca0af3:42349. 2024-12-09T11:04:52,928 DEBUG [RS:2;3469f9ca0af3:42349 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T11:04:52,928 DEBUG [RS:2;3469f9ca0af3:42349 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:04:52,928 INFO [RS:2;3469f9ca0af3:42349 {}] regionserver.HRegionServer(976): stopping server 3469f9ca0af3,42349,1733741767108; all regions closed. 2024-12-09T11:04:52,928 INFO [RS:1;3469f9ca0af3:33293 {}] regionserver.HRegionServer(959): stopping server 3469f9ca0af3,33293,1733741767044 2024-12-09T11:04:52,928 INFO [RS:1;3469f9ca0af3:33293 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T11:04:52,928 INFO [RS:1;3469f9ca0af3:33293 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;3469f9ca0af3:33293. 2024-12-09T11:04:52,928 DEBUG [RS:1;3469f9ca0af3:33293 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T11:04:52,928 DEBUG [RS:1;3469f9ca0af3:33293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:04:52,928 INFO [RS:1;3469f9ca0af3:33293 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T11:04:52,928 INFO [RS:1;3469f9ca0af3:33293 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T11:04:52,929 INFO [RS:1;3469f9ca0af3:33293 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T11:04:52,929 INFO [RS:1;3469f9ca0af3:33293 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T11:04:52,929 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing f33da0e723b1140f1a0ea77cd0d168fd, disabling compactions & flushes 2024-12-09T11:04:52,929 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1733742123223.f33da0e723b1140f1a0ea77cd0d168fd. 2024-12-09T11:04:52,929 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1733742123223.f33da0e723b1140f1a0ea77cd0d168fd. 2024-12-09T11:04:52,929 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1733742123223.f33da0e723b1140f1a0ea77cd0d168fd. after waiting 0 ms 2024-12-09T11:04:52,929 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1733742123223.f33da0e723b1140f1a0ea77cd0d168fd. 2024-12-09T11:04:52,929 INFO [RS:1;3469f9ca0af3:33293 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-09T11:04:52,929 DEBUG [RS:1;3469f9ca0af3:33293 {}] regionserver.HRegionServer(1325): Online Regions={f33da0e723b1140f1a0ea77cd0d168fd=testExportExpiredSnapshot,1,1733742123223.f33da0e723b1140f1a0ea77cd0d168fd., 1588230740=hbase:meta,,1.1588230740} 2024-12-09T11:04:52,929 DEBUG [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T11:04:52,929 INFO [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T11:04:52,929 DEBUG [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T11:04:52,929 DEBUG [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T11:04:52,929 DEBUG [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T11:04:52,929 INFO [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=44.08 KB heapSize=69.55 KB 2024-12-09T11:04:52,930 DEBUG [RS:1;3469f9ca0af3:33293 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, f33da0e723b1140f1a0ea77cd0d168fd 2024-12-09T11:04:52,932 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T11:04:52,932 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T11:04:52,934 INFO [RS:0;3469f9ca0af3:39691 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T11:04:52,934 INFO [RS:0;3469f9ca0af3:39691 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T11:04:52,934 INFO [RS:0;3469f9ca0af3:39691 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T11:04:52,934 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T11:04:52,934 INFO [RS:0;3469f9ca0af3:39691 {}] regionserver.HRegionServer(3091): Received CLOSE for cca3861865154a4d1c7857f88e2ede7a 2024-12-09T11:04:52,934 INFO [RS:0;3469f9ca0af3:39691 {}] regionserver.HRegionServer(3091): Received CLOSE for a0d08af07fc0beaa578cbd208923b1fb 2024-12-09T11:04:52,934 INFO [RS:0;3469f9ca0af3:39691 {}] regionserver.HRegionServer(959): stopping server 3469f9ca0af3,39691,1733741766880 2024-12-09T11:04:52,934 INFO [RS:0;3469f9ca0af3:39691 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T11:04:52,934 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing cca3861865154a4d1c7857f88e2ede7a, disabling compactions & flushes 2024-12-09T11:04:52,934 INFO [RS:0;3469f9ca0af3:39691 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3469f9ca0af3:39691. 2024-12-09T11:04:52,934 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1733742123223.cca3861865154a4d1c7857f88e2ede7a. 2024-12-09T11:04:52,934 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1733742123223.cca3861865154a4d1c7857f88e2ede7a. 2024-12-09T11:04:52,934 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1733742123223.cca3861865154a4d1c7857f88e2ede7a. after waiting 0 ms 2024-12-09T11:04:52,934 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1733742123223.cca3861865154a4d1c7857f88e2ede7a. 2024-12-09T11:04:52,934 DEBUG [RS:0;3469f9ca0af3:39691 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T11:04:52,934 DEBUG [RS:0;3469f9ca0af3:39691 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:04:52,934 INFO [RS:0;3469f9ca0af3:39691 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-09T11:04:52,934 DEBUG [RS:0;3469f9ca0af3:39691 {}] regionserver.HRegionServer(1325): Online Regions={cca3861865154a4d1c7857f88e2ede7a=testExportExpiredSnapshot,,1733742123223.cca3861865154a4d1c7857f88e2ede7a., a0d08af07fc0beaa578cbd208923b1fb=hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb.} 2024-12-09T11:04:52,934 DEBUG [RS:0;3469f9ca0af3:39691 {}] regionserver.HRegionServer(1351): Waiting on a0d08af07fc0beaa578cbd208923b1fb, cca3861865154a4d1c7857f88e2ede7a 2024-12-09T11:04:52,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741835_1011 (size=9661) 2024-12-09T11:04:52,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741835_1011 (size=9661) 2024-12-09T11:04:52,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741835_1011 (size=9661) 2024-12-09T11:04:52,941 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportExpiredSnapshot/f33da0e723b1140f1a0ea77cd0d168fd/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-09T11:04:52,941 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T11:04:52,941 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1733742123223.f33da0e723b1140f1a0ea77cd0d168fd. 2024-12-09T11:04:52,941 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for f33da0e723b1140f1a0ea77cd0d168fd: Waiting for close lock at 1733742292928Running coprocessor pre-close hooks at 1733742292928Disabling compacts and flushes for region at 1733742292929 (+1 ms)Disabling writes for close at 1733742292929Writing region close event to WAL at 1733742292930 (+1 ms)Running coprocessor post-close hooks at 1733742292941 (+11 ms)Closed at 1733742292941 2024-12-09T11:04:52,941 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,1,1733742123223.f33da0e723b1140f1a0ea77cd0d168fd. 2024-12-09T11:04:52,942 DEBUG [RS:2;3469f9ca0af3:42349 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/oldWALs 2024-12-09T11:04:52,942 INFO [RS:2;3469f9ca0af3:42349 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 3469f9ca0af3%2C42349%2C1733741767108:(num 1733741769317) 2024-12-09T11:04:52,942 DEBUG [RS:2;3469f9ca0af3:42349 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:04:52,942 INFO [RS:2;3469f9ca0af3:42349 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T11:04:52,942 INFO [RS:2;3469f9ca0af3:42349 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T11:04:52,942 INFO [RS:2;3469f9ca0af3:42349 {}] hbase.ChoreService(370): Chore service for: regionserver/3469f9ca0af3:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-09T11:04:52,943 INFO [RS:2;3469f9ca0af3:42349 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T11:04:52,943 INFO [RS:2;3469f9ca0af3:42349 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T11:04:52,943 INFO [RS:2;3469f9ca0af3:42349 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T11:04:52,943 INFO [RS:2;3469f9ca0af3:42349 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T11:04:52,943 INFO [regionserver/3469f9ca0af3:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T11:04:52,943 INFO [RS:2;3469f9ca0af3:42349 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42349 2024-12-09T11:04:52,945 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/default/testExportExpiredSnapshot/cca3861865154a4d1c7857f88e2ede7a/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-09T11:04:52,947 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T11:04:52,947 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1733742123223.cca3861865154a4d1c7857f88e2ede7a. 2024-12-09T11:04:52,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T11:04:52,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3469f9ca0af3,42349,1733741767108 2024-12-09T11:04:52,947 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for cca3861865154a4d1c7857f88e2ede7a: Waiting for close lock at 1733742292934Running coprocessor pre-close hooks at 1733742292934Disabling compacts and flushes for region at 1733742292934Disabling writes for close at 1733742292934Writing region close event to WAL at 1733742292938 (+4 ms)Running coprocessor post-close hooks at 1733742292947 (+9 ms)Closed at 1733742292947 2024-12-09T11:04:52,947 INFO [RS:2;3469f9ca0af3:42349 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T11:04:52,947 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,,1733742123223.cca3861865154a4d1c7857f88e2ede7a. 2024-12-09T11:04:52,948 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing a0d08af07fc0beaa578cbd208923b1fb, disabling compactions & flushes 2024-12-09T11:04:52,948 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb. 2024-12-09T11:04:52,948 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb. 2024-12-09T11:04:52,948 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb. after waiting 0 ms 2024-12-09T11:04:52,948 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb. 2024-12-09T11:04:52,948 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing a0d08af07fc0beaa578cbd208923b1fb 1/1 column families, dataSize=190 B heapSize=672 B 2024-12-09T11:04:52,948 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3469f9ca0af3,42349,1733741767108] 2024-12-09T11:04:52,950 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3469f9ca0af3,42349,1733741767108 already deleted, retry=false 2024-12-09T11:04:52,950 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3469f9ca0af3,42349,1733741767108 expired; onlineServers=2 2024-12-09T11:04:52,952 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/acl/a0d08af07fc0beaa578cbd208923b1fb/.tmp/l/542895f8c8014dd8853a7e111061e6ee is 68, key is testtb-testExportFileSystemStateWithSkipTmp/l:/1733742258457/DeleteFamily/seqid=0 2024-12-09T11:04:52,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742474_1650 (size=5142) 2024-12-09T11:04:52,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742474_1650 (size=5142) 2024-12-09T11:04:52,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742474_1650 (size=5142) 2024-12-09T11:04:52,958 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=190 B at sequenceid=34 (bloomFilter=false), to=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/acl/a0d08af07fc0beaa578cbd208923b1fb/.tmp/l/542895f8c8014dd8853a7e111061e6ee 2024-12-09T11:04:52,962 DEBUG [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/.tmp/info/277ad21695774e069819b8ab4e87d299 is 173, key is testExportExpiredSnapshot,1,1733742123223.f33da0e723b1140f1a0ea77cd0d168fd./info:regioninfo/1733742123582/Put/seqid=0 2024-12-09T11:04:52,962 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 542895f8c8014dd8853a7e111061e6ee 2024-12-09T11:04:52,963 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/acl/a0d08af07fc0beaa578cbd208923b1fb/.tmp/l/542895f8c8014dd8853a7e111061e6ee as hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/acl/a0d08af07fc0beaa578cbd208923b1fb/l/542895f8c8014dd8853a7e111061e6ee 2024-12-09T11:04:52,963 INFO [regionserver/3469f9ca0af3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T11:04:52,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742475_1651 (size=11460) 2024-12-09T11:04:52,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742475_1651 (size=11460) 2024-12-09T11:04:52,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742475_1651 (size=11460) 2024-12-09T11:04:52,971 INFO [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=37.42 KB at sequenceid=242 (bloomFilter=true), to=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/.tmp/info/277ad21695774e069819b8ab4e87d299 2024-12-09T11:04:52,975 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 542895f8c8014dd8853a7e111061e6ee 2024-12-09T11:04:52,975 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/acl/a0d08af07fc0beaa578cbd208923b1fb/l/542895f8c8014dd8853a7e111061e6ee, entries=2, sequenceid=34, filesize=5.0 K 2024-12-09T11:04:52,977 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~190 B/190, heapSize ~656 B/656, currentSize=0 B/0 for a0d08af07fc0beaa578cbd208923b1fb in 29ms, sequenceid=34, compaction requested=false 2024-12-09T11:04:52,980 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/acl/a0d08af07fc0beaa578cbd208923b1fb/recovered.edits/37.seqid, newMaxSeqId=37, maxSeqId=1 2024-12-09T11:04:52,980 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T11:04:52,981 INFO [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb. 2024-12-09T11:04:52,981 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for a0d08af07fc0beaa578cbd208923b1fb: Waiting for close lock at 1733742292947Running coprocessor pre-close hooks at 1733742292947Disabling compacts and flushes for region at 1733742292948 (+1 ms)Disabling writes for close at 1733742292948Obtaining lock to block concurrent updates at 1733742292948Preparing flush snapshotting stores in a0d08af07fc0beaa578cbd208923b1fb at 1733742292948Finished memstore snapshotting hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb., syncing WAL and waiting on mvcc, flushsize=dataSize=190, getHeapSize=656, getOffHeapSize=0, getCellsCount=3 at 1733742292948Flushing stores of hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb. at 1733742292949 (+1 ms)Flushing a0d08af07fc0beaa578cbd208923b1fb/l: creating writer at 1733742292949Flushing a0d08af07fc0beaa578cbd208923b1fb/l: appending metadata at 1733742292952 (+3 ms)Flushing a0d08af07fc0beaa578cbd208923b1fb/l: closing flushed file at 1733742292952Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@654e0c1e: reopening flushed file at 1733742292962 (+10 ms)Finished flush of dataSize ~190 B/190, heapSize ~656 B/656, currentSize=0 B/0 for a0d08af07fc0beaa578cbd208923b1fb in 29ms, sequenceid=34, compaction requested=false at 1733742292977 (+15 ms)Writing region close event to WAL at 1733742292978 (+1 ms)Running coprocessor post-close hooks at 1733742292980 (+2 ms)Closed at 1733742292980 2024-12-09T11:04:52,981 DEBUG [RS_CLOSE_REGION-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:acl,,1733741770603.a0d08af07fc0beaa578cbd208923b1fb. 2024-12-09T11:04:52,991 DEBUG [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/.tmp/ns/3d1ed5b0606b4273a9ea6838e27fd86d is 124, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095498.4642d1cde05785f48336193c9593c4ad./ns:/1733742120450/DeleteFamily/seqid=0 2024-12-09T11:04:52,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742476_1652 (size=6964) 2024-12-09T11:04:52,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742476_1652 (size=6964) 2024-12-09T11:04:52,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742476_1652 (size=6964) 2024-12-09T11:04:52,997 INFO [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.38 KB at sequenceid=242 (bloomFilter=true), to=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/.tmp/ns/3d1ed5b0606b4273a9ea6838e27fd86d 2024-12-09T11:04:53,015 DEBUG [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/.tmp/rep_barrier/8a4ffecae9c74a6998183d17a9d94c1e is 133, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095498.4642d1cde05785f48336193c9593c4ad./rep_barrier:/1733742120450/DeleteFamily/seqid=0 2024-12-09T11:04:53,017 INFO [regionserver/3469f9ca0af3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T11:04:53,017 INFO [regionserver/3469f9ca0af3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T11:04:53,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742477_1653 (size=7108) 2024-12-09T11:04:53,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742477_1653 (size=7108) 2024-12-09T11:04:53,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742477_1653 (size=7108) 2024-12-09T11:04:53,021 INFO [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.49 KB at sequenceid=242 (bloomFilter=true), to=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/.tmp/rep_barrier/8a4ffecae9c74a6998183d17a9d94c1e 2024-12-09T11:04:53,039 DEBUG [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/.tmp/table/a0ca22ba48cb40339bf099527f93615e is 127, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733742095498.4642d1cde05785f48336193c9593c4ad./table:/1733742120450/DeleteFamily/seqid=0 2024-12-09T11:04:53,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742478_1654 (size=7556) 2024-12-09T11:04:53,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742478_1654 (size=7556) 2024-12-09T11:04:53,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742478_1654 (size=7556) 2024-12-09T11:04:53,044 INFO [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.79 KB at sequenceid=242 (bloomFilter=true), to=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/.tmp/table/a0ca22ba48cb40339bf099527f93615e 2024-12-09T11:04:53,049 DEBUG [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/.tmp/info/277ad21695774e069819b8ab4e87d299 as hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/info/277ad21695774e069819b8ab4e87d299 2024-12-09T11:04:53,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:04:53,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42349-0x100bd63feef0003, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:04:53,051 INFO [RS:2;3469f9ca0af3:42349 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T11:04:53,051 INFO [RS:2;3469f9ca0af3:42349 {}] regionserver.HRegionServer(1031): Exiting; stopping=3469f9ca0af3,42349,1733741767108; zookeeper connection closed. 2024-12-09T11:04:53,051 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4be08422 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4be08422 2024-12-09T11:04:53,053 INFO [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/info/277ad21695774e069819b8ab4e87d299, entries=48, sequenceid=242, filesize=11.2 K 2024-12-09T11:04:53,054 DEBUG [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/.tmp/ns/3d1ed5b0606b4273a9ea6838e27fd86d as hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/ns/3d1ed5b0606b4273a9ea6838e27fd86d 2024-12-09T11:04:53,058 INFO [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/ns/3d1ed5b0606b4273a9ea6838e27fd86d, entries=13, sequenceid=242, filesize=6.8 K 2024-12-09T11:04:53,058 DEBUG [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/.tmp/rep_barrier/8a4ffecae9c74a6998183d17a9d94c1e as hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/rep_barrier/8a4ffecae9c74a6998183d17a9d94c1e 2024-12-09T11:04:53,062 INFO [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/rep_barrier/8a4ffecae9c74a6998183d17a9d94c1e, entries=13, sequenceid=242, filesize=6.9 K 2024-12-09T11:04:53,063 DEBUG [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/.tmp/table/a0ca22ba48cb40339bf099527f93615e as hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/table/a0ca22ba48cb40339bf099527f93615e 2024-12-09T11:04:53,068 INFO [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/table/a0ca22ba48cb40339bf099527f93615e, entries=22, sequenceid=242, filesize=7.4 K 2024-12-09T11:04:53,069 INFO [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~44.08 KB/45134, heapSize ~69.48 KB/71152, currentSize=0 B/0 for 1588230740 in 140ms, sequenceid=242, compaction requested=false 2024-12-09T11:04:53,073 DEBUG [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/data/hbase/meta/1588230740/recovered.edits/245.seqid, newMaxSeqId=245, maxSeqId=122 2024-12-09T11:04:53,073 DEBUG [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T11:04:53,074 DEBUG [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T11:04:53,074 INFO [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T11:04:53,074 DEBUG [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733742292929Running coprocessor pre-close hooks at 1733742292929Disabling compacts and flushes for region at 1733742292929Disabling writes for close at 1733742292929Obtaining lock to block concurrent updates at 1733742292929Preparing flush snapshotting stores in 1588230740 at 1733742292929Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=45134, getHeapSize=71152, getOffHeapSize=0, getCellsCount=332 at 1733742292930 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733742292931 (+1 ms)Flushing 1588230740/info: creating writer at 1733742292931Flushing 1588230740/info: appending metadata at 1733742292962 (+31 ms)Flushing 1588230740/info: closing flushed file at 1733742292962Flushing 1588230740/ns: creating writer at 1733742292976 (+14 ms)Flushing 1588230740/ns: appending metadata at 1733742292991 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1733742292991Flushing 1588230740/rep_barrier: creating writer at 1733742293001 (+10 ms)Flushing 1588230740/rep_barrier: appending metadata at 1733742293015 (+14 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1733742293015Flushing 1588230740/table: creating writer at 1733742293024 (+9 ms)Flushing 1588230740/table: appending metadata at 1733742293039 (+15 ms)Flushing 1588230740/table: closing flushed file at 1733742293039Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3b8fa77f: reopening flushed file at 1733742293049 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6ae3845f: reopening flushed file at 1733742293053 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@15951a02: reopening flushed file at 1733742293058 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@a129f46: reopening flushed file at 1733742293063 (+5 ms)Finished flush of dataSize ~44.08 KB/45134, heapSize ~69.48 KB/71152, currentSize=0 B/0 for 1588230740 in 140ms, sequenceid=242, compaction requested=false at 1733742293069 (+6 ms)Writing region close event to WAL at 1733742293070 (+1 ms)Running coprocessor post-close hooks at 1733742293073 (+3 ms)Closed at 1733742293074 (+1 ms) 2024-12-09T11:04:53,074 DEBUG [RS_CLOSE_META-regionserver/3469f9ca0af3:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T11:04:53,096 INFO [regionserver/3469f9ca0af3:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T11:04:53,096 INFO [regionserver/3469f9ca0af3:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T11:04:53,110 INFO [regionserver/3469f9ca0af3:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T11:04:53,110 INFO [regionserver/3469f9ca0af3:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T11:04:53,130 INFO [RS:1;3469f9ca0af3:33293 {}] regionserver.HRegionServer(976): stopping server 3469f9ca0af3,33293,1733741767044; all regions closed. 2024-12-09T11:04:53,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742175_1351 (size=52030) 2024-12-09T11:04:53,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742175_1351 (size=52030) 2024-12-09T11:04:53,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742175_1351 (size=52030) 2024-12-09T11:04:53,134 INFO [RS:0;3469f9ca0af3:39691 {}] regionserver.HRegionServer(976): stopping server 3469f9ca0af3,39691,1733741766880; all regions closed. 2024-12-09T11:04:53,134 DEBUG [RS:1;3469f9ca0af3:33293 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/oldWALs 2024-12-09T11:04:53,135 INFO [RS:1;3469f9ca0af3:33293 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 3469f9ca0af3%2C33293%2C1733741767044.meta:.meta(num 1733742072374) 2024-12-09T11:04:53,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741836_1012 (size=49185) 2024-12-09T11:04:53,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741833_1009 (size=14836) 2024-12-09T11:04:53,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741836_1012 (size=49185) 2024-12-09T11:04:53,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741833_1009 (size=14836) 2024-12-09T11:04:53,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741833_1009 (size=14836) 2024-12-09T11:04:53,139 DEBUG [RS:0;3469f9ca0af3:39691 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/oldWALs 2024-12-09T11:04:53,139 INFO [RS:0;3469f9ca0af3:39691 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 3469f9ca0af3%2C39691%2C1733741766880.meta:.meta(num 1733741769988) 2024-12-09T11:04:53,139 DEBUG [RS:1;3469f9ca0af3:33293 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/oldWALs 2024-12-09T11:04:53,139 INFO [RS:1;3469f9ca0af3:33293 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 3469f9ca0af3%2C33293%2C1733741767044:(num 1733741769309) 2024-12-09T11:04:53,139 DEBUG [RS:1;3469f9ca0af3:33293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:04:53,139 INFO [RS:1;3469f9ca0af3:33293 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T11:04:53,139 INFO [RS:1;3469f9ca0af3:33293 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T11:04:53,139 INFO [RS:1;3469f9ca0af3:33293 {}] hbase.ChoreService(370): Chore service for: regionserver/3469f9ca0af3:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T11:04:53,139 INFO [RS:1;3469f9ca0af3:33293 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T11:04:53,139 INFO [regionserver/3469f9ca0af3:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T11:04:53,140 INFO [RS:1;3469f9ca0af3:33293 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33293 2024-12-09T11:04:53,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741834_1010 (size=19078) 2024-12-09T11:04:53,142 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T11:04:53,142 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3469f9ca0af3,33293,1733741767044 2024-12-09T11:04:53,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073741834_1010 (size=19078) 2024-12-09T11:04:53,142 INFO [RS:1;3469f9ca0af3:33293 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T11:04:53,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073741834_1010 (size=19078) 2024-12-09T11:04:53,143 DEBUG [RS:0;3469f9ca0af3:39691 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/oldWALs 2024-12-09T11:04:53,143 INFO [RS:0;3469f9ca0af3:39691 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 3469f9ca0af3%2C39691%2C1733741766880:(num 1733741769298) 2024-12-09T11:04:53,143 DEBUG [RS:0;3469f9ca0af3:39691 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:04:53,143 INFO [RS:0;3469f9ca0af3:39691 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T11:04:53,144 INFO [RS:0;3469f9ca0af3:39691 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T11:04:53,144 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3469f9ca0af3,33293,1733741767044] 2024-12-09T11:04:53,144 INFO [RS:0;3469f9ca0af3:39691 {}] hbase.ChoreService(370): Chore service for: regionserver/3469f9ca0af3:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T11:04:53,144 INFO [RS:0;3469f9ca0af3:39691 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T11:04:53,144 INFO [RS:0;3469f9ca0af3:39691 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T11:04:53,144 INFO [RS:0;3469f9ca0af3:39691 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T11:04:53,144 INFO [regionserver/3469f9ca0af3:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T11:04:53,144 INFO [RS:0;3469f9ca0af3:39691 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T11:04:53,144 INFO [RS:0;3469f9ca0af3:39691 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39691 2024-12-09T11:04:53,145 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3469f9ca0af3,33293,1733741767044 already deleted, retry=false 2024-12-09T11:04:53,145 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3469f9ca0af3,33293,1733741767044 expired; onlineServers=1 2024-12-09T11:04:53,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3469f9ca0af3,39691,1733741766880 2024-12-09T11:04:53,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T11:04:53,146 INFO [RS:0;3469f9ca0af3:39691 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T11:04:53,147 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3469f9ca0af3,39691,1733741766880] 2024-12-09T11:04:53,149 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3469f9ca0af3,39691,1733741766880 already deleted, retry=false 2024-12-09T11:04:53,149 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3469f9ca0af3,39691,1733741766880 expired; onlineServers=0 2024-12-09T11:04:53,149 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3469f9ca0af3,35815,1733741765917' ***** 2024-12-09T11:04:53,149 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T11:04:53,149 INFO [M:0;3469f9ca0af3:35815 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T11:04:53,149 INFO [M:0;3469f9ca0af3:35815 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T11:04:53,149 DEBUG [M:0;3469f9ca0af3:35815 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T11:04:53,149 DEBUG [M:0;3469f9ca0af3:35815 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T11:04:53,149 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T11:04:53,149 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster-HFileCleaner.large.0-1733741768839 {}] cleaner.HFileCleaner(306): Exit Thread[master/3469f9ca0af3:0:becomeActiveMaster-HFileCleaner.large.0-1733741768839,5,FailOnTimeoutGroup] 2024-12-09T11:04:53,149 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster-HFileCleaner.small.0-1733741768854 {}] cleaner.HFileCleaner(306): Exit Thread[master/3469f9ca0af3:0:becomeActiveMaster-HFileCleaner.small.0-1733741768854,5,FailOnTimeoutGroup] 2024-12-09T11:04:53,149 INFO [M:0;3469f9ca0af3:35815 {}] hbase.ChoreService(370): Chore service for: master/3469f9ca0af3:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T11:04:53,149 INFO [M:0;3469f9ca0af3:35815 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T11:04:53,150 DEBUG [M:0;3469f9ca0af3:35815 {}] master.HMaster(1795): Stopping service threads 2024-12-09T11:04:53,150 INFO [M:0;3469f9ca0af3:35815 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T11:04:53,150 INFO [M:0;3469f9ca0af3:35815 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T11:04:53,150 INFO [M:0;3469f9ca0af3:35815 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T11:04:53,150 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T11:04:53,151 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T11:04:53,151 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:04:53,151 DEBUG [M:0;3469f9ca0af3:35815 {}] zookeeper.ZKUtil(347): master:35815-0x100bd63feef0000, quorum=127.0.0.1:57831, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T11:04:53,151 WARN [M:0;3469f9ca0af3:35815 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T11:04:53,152 INFO [M:0;3469f9ca0af3:35815 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/.lastflushedseqids 2024-12-09T11:04:53,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45147 is added to blk_1073742479_1655 (size=313) 2024-12-09T11:04:53,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073742479_1655 (size=313) 2024-12-09T11:04:53,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34611 is added to blk_1073742479_1655 (size=313) 2024-12-09T11:04:53,164 INFO [M:0;3469f9ca0af3:35815 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T11:04:53,164 INFO [M:0;3469f9ca0af3:35815 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T11:04:53,164 DEBUG [M:0;3469f9ca0af3:35815 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T11:04:53,178 INFO [M:0;3469f9ca0af3:35815 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:04:53,178 DEBUG [M:0;3469f9ca0af3:35815 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:04:53,178 DEBUG [M:0;3469f9ca0af3:35815 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T11:04:53,178 DEBUG [M:0;3469f9ca0af3:35815 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:04:53,178 INFO [M:0;3469f9ca0af3:35815 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=996.59 KB heapSize=1.17 MB 2024-12-09T11:04:53,179 ERROR [AsyncFSWAL-0-hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/MasterData-prefix:3469f9ca0af3,35815,1733741765917 {}] server.NIOServerCnxnFactory(85): Thread Thread[AsyncFSWAL-0-hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/MasterData-prefix:3469f9ca0af3,35815,1733741765917,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.readableBytes()" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.buffered(FanOutOneBlockAsyncDFSOutput.java:414) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.append(AsyncProtobufLogWriter.java:134) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:181) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:100) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendEntry(AbstractFSWAL.java:1333) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendAndSync(AbstractFSWAL.java:1724) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.consume(AbstractFSWAL.java:1832) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:04:53,244 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:04:53,244 INFO [RS:1;3469f9ca0af3:33293 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T11:04:53,244 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x100bd63feef0002, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:04:53,244 INFO [RS:1;3469f9ca0af3:33293 {}] regionserver.HRegionServer(1031): Exiting; stopping=3469f9ca0af3,33293,1733741767044; zookeeper connection closed. 2024-12-09T11:04:53,244 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3640a2ee {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3640a2ee 2024-12-09T11:04:53,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:04:53,248 INFO [RS:0;3469f9ca0af3:39691 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T11:04:53,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39691-0x100bd63feef0001, quorum=127.0.0.1:57831, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:04:53,248 INFO [RS:0;3469f9ca0af3:39691 {}] regionserver.HRegionServer(1031): Exiting; stopping=3469f9ca0af3,39691,1733741766880; zookeeper connection closed. 2024-12-09T11:04:53,248 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@766935b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@766935b 2024-12-09T11:04:53,249 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-09T11:04:55,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44813 is added to blk_1073741836_1012 (size=49185) 2024-12-09T11:04:56,469 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T11:04:56,469 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T11:04:56,469 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T11:04:56,470 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-09T11:04:56,470 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-09T11:04:56,470 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T11:04:56,470 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-09T11:04:56,470 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T11:04:58,457 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T11:05:04,510 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T11:05:34,510 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;3469f9ca0af3:35815 237 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 3 Waited count: 5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 43 Waited count: 22 Waiting on java.lang.ref.ReferenceQueue$Lock@103771d3 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 22 Waited count: 27 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 26 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7cc6be6c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 34 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5935 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 60 Waiting on java.util.concurrent.CountDownLatch$Sync@46075209 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 11951 Waited count: 12826 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 17 Waited count: 17 Waiting on java.lang.ref.ReferenceQueue$Lock@729ea902 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@3b624ce3 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@56c8d05a): State: TIMED_WAITING Blocked count: 0 Waited count: 1183 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 119 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1519861075-37): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f6d6042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1519861075-38): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f6d6042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1519861075-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f6d6042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1519861075-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f6d6042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1519861075-41-acceptor-0@173365be-ServerConnector@6b557a24{HTTP/1.1, (http/1.1)}{localhost:36709}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1519861075-42): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1519861075-43): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1519861075-44): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-b2558db-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 41 Waited count: 3556 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b503da Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 35869): State: TIMED_WAITING Blocked count: 1 Waited count: 61 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 119 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@27449207): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 198 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@24d0128): State: TIMED_WAITING Blocked count: 0 Waited count: 119 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 198 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 57010 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1701 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4c4aee56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 35869): State: TIMED_WAITING Blocked count: 51 Waited count: 2767 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 35869): State: TIMED_WAITING Blocked count: 72 Waited count: 2775 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 35869): State: TIMED_WAITING Blocked count: 47 Waited count: 2792 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 35869): State: TIMED_WAITING Blocked count: 57 Waited count: 2787 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 35869): State: TIMED_WAITING Blocked count: 62 Waited count: 2771 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@2b8db2f8): State: TIMED_WAITING Blocked count: 0 Waited count: 296 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@765a473): State: TIMED_WAITING Blocked count: 0 Waited count: 119 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@96ef7ce): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@66f60370): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(304209986)): State: TIMED_WAITING Blocked count: 0 Waited count: 21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 79 (process reaper): State: TIMED_WAITING Blocked count: 16 Waited count: 1824 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1544511259-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f6d6042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1544511259-88-acceptor-0@15d5b20b-ServerConnector@68913343{HTTP/1.1, (http/1.1)}{localhost:46253}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1544511259-89): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1544511259-90): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-5f9885ae-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@74cdb702): State: TIMED_WAITING Blocked count: 0 Waited count: 1179 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 44147): State: TIMED_WAITING Blocked count: 1 Waited count: 60 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 119 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 2 Waited count: 323 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4efdf487 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1191631881-172.17.0.2-1733741760972 heartbeating to localhost/127.0.0.1:35869): State: TIMED_WAITING Blocked count: 1537 Waited count: 1757 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@1e8d2466): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 44147): State: TIMED_WAITING Blocked count: 0 Waited count: 590 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 44147): State: TIMED_WAITING Blocked count: 0 Waited count: 591 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 44147): State: TIMED_WAITING Blocked count: 0 Waited count: 591 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 44147): State: TIMED_WAITING Blocked count: 0 Waited count: 590 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 44147): State: TIMED_WAITING Blocked count: 0 Waited count: 590 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 112 (IPC Client (1359318497) connection to localhost/127.0.0.1:35869 from jenkins): State: TIMED_WAITING Blocked count: 1733 Waited count: 1734 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 113 (IPC Parameter Sending Thread for localhost/127.0.0.1:35869): State: TIMED_WAITING Blocked count: 0 Waited count: 2286 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp162707258-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f6d6042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp162707258-122-acceptor-0@5e1c866e-ServerConnector@952b611{HTTP/1.1, (http/1.1)}{localhost:41107}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp162707258-123): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp162707258-124): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-70e868c0-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 128 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@21f186f3): State: TIMED_WAITING Blocked count: 0 Waited count: 1178 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 130 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 131 (IPC Server idle connection scanner for port 34213): State: TIMED_WAITING Blocked count: 1 Waited count: 60 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 133 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 118 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (Command processor): State: WAITING Blocked count: 1 Waited count: 308 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3829410f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 138 (BP-1191631881-172.17.0.2-1733741760972 heartbeating to localhost/127.0.0.1:35869): State: TIMED_WAITING Blocked count: 1513 Waited count: 1747 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 139 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@20c9f17d): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 132 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 129 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 140 (IPC Server handler 0 on default port 34213): State: TIMED_WAITING Blocked count: 0 Waited count: 605 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 1 on default port 34213): State: TIMED_WAITING Blocked count: 0 Waited count: 592 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 2 on default port 34213): State: TIMED_WAITING Blocked count: 0 Waited count: 605 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 3 on default port 34213): State: TIMED_WAITING Blocked count: 0 Waited count: 591 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 144 (IPC Server handler 4 on default port 34213): State: TIMED_WAITING Blocked count: 0 Waited count: 591 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 150 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data1)): State: TIMED_WAITING Blocked count: 3 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 151 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data2)): State: TIMED_WAITING Blocked count: 10 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 158 (pool-39-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (qtp12009514-160): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f6d6042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (qtp12009514-163-acceptor-0@61f3d98c-ServerConnector@8ce124a{HTTP/1.1, (http/1.1)}{localhost:35723}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 164 (qtp12009514-164): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 165 (qtp12009514-165): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 166 (Session-HouseKeeper-2d3d98d3-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data2/current/BP-1191631881-172.17.0.2-1733741760972): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 168 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data1/current/BP-1191631881-172.17.0.2-1733741760972): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data3)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 178 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data4)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 180 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 184 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data3/current/BP-1191631881-172.17.0.2-1733741760972): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 187 (java.util.concurrent.ThreadPoolExecutor$Worker@62bf0be0[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 188 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data4/current/BP-1191631881-172.17.0.2-1733741760972): State: TIMED_WAITING Blocked count: 4 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 192 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 193 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (java.util.concurrent.ThreadPoolExecutor$Worker@4633d828[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@3fc8bea0): State: TIMED_WAITING Blocked count: 0 Waited count: 1178 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 200 (IPC Server idle connection scanner for port 37743): State: TIMED_WAITING Blocked count: 1 Waited count: 60 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 202 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 118 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (Command processor): State: WAITING Blocked count: 1 Waited count: 381 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e55ffab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 206 (BP-1191631881-172.17.0.2-1733741760972 heartbeating to localhost/127.0.0.1:35869): State: TIMED_WAITING Blocked count: 1511 Waited count: 1741 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@51335b59): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 198 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 208 (IPC Server handler 0 on default port 37743): State: TIMED_WAITING Blocked count: 0 Waited count: 589 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 209 (IPC Server handler 1 on default port 37743): State: TIMED_WAITING Blocked count: 0 Waited count: 589 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 210 (IPC Server handler 2 on default port 37743): State: TIMED_WAITING Blocked count: 0 Waited count: 589 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 3 on default port 37743): State: TIMED_WAITING Blocked count: 0 Waited count: 589 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 4 on default port 37743): State: TIMED_WAITING Blocked count: 0 Waited count: 591 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 215 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 216 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 221 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data6/current/BP-1191631881-172.17.0.2-1733741760972): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 222 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data5/current/BP-1191631881-172.17.0.2-1733741760972): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (java.util.concurrent.ThreadPoolExecutor$Worker@6a59f49e[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 237 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 236 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:57831): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 235 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 60 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 239 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 295 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 240 (SyncThread:0): State: WAITING Blocked count: 8 Waited count: 432 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4fbc4e2e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 241 (ProcessThread(sid:0 cport:57831):): State: WAITING Blocked count: 2 Waited count: 528 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@43f8aa7b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 242 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 572 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@628dc60 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 243 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 103 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 245 (LeaseRenewer:jenkins@localhost:35869): State: TIMED_WAITING Blocked count: 17 Waited count: 614 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 254 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@40d5c7 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 485 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 87 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test-SendThread(127.0.0.1:57831)): State: RUNNABLE Blocked count: 25 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 258 (Time-limited test-EventThread): State: WAITING Blocked count: 1 Waited count: 67 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46e61ba7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 259 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 103 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 103 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-4): State: WAITING Blocked count: 0 Waited count: 103 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (zk-event-processor-pool-0): State: WAITING Blocked count: 20 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6a710333 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-5): State: WAITING Blocked count: 4 Waited count: 105 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-6): State: WAITING Blocked count: 3 Waited count: 103 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-7): State: WAITING Blocked count: 3 Waited count: 102 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-8): State: WAITING Blocked count: 1 Waited count: 103 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-9): State: WAITING Blocked count: 2 Waited count: 102 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-10): State: WAITING Blocked count: 6 Waited count: 103 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 102 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-12): State: WAITING Blocked count: 3 Waited count: 103 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 103 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-14): State: WAITING Blocked count: 2 Waited count: 102 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-15): State: WAITING Blocked count: 2 Waited count: 102 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-16): State: WAITING Blocked count: 1 Waited count: 103 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35815): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6c191d57 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815): State: WAITING Blocked count: 182 Waited count: 704 Waiting on java.util.concurrent.Semaphore$NonfairSync@2fc373e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815): State: WAITING Blocked count: 61 Waited count: 365 Waiting on java.util.concurrent.Semaphore$NonfairSync@74b13fe5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35815): State: WAITING Blocked count: 85 Waited count: 13632 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16bbd2db Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35815): State: WAITING Blocked count: 1 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@32ec96fe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35815): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@32ec96fe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=35815): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@51ca125a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=35815): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2cf94191 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=35815): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@649c63df Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=35815): State: WAITING Blocked count: 0 Waited count: 7 Waiting on java.util.concurrent.Semaphore$NonfairSync@329152d4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33c8ce1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 309 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 329 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 46 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 286 (M:0;3469f9ca0af3:35815): State: TIMED_WAITING Blocked count: 12 Waited count: 5247 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1101/0x00007f6d60f95ba0.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 350 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 59 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 352 (master/3469f9ca0af3:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 354 (master/3469f9ca0af3:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 356 (org.apache.hadoop.hdfs.PeerCache@782734c3): State: TIMED_WAITING Blocked count: 0 Waited count: 196 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 375 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5827 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 392 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 70 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 393 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 45 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 406 (Idle-Rpc-Conn-Sweeper-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 165 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 417 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 405 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 58333 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 426 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 97 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 427 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 114 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 452 (RegionServerTracker-0): State: WAITING Blocked count: 10 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3019739b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 476 (regionserver/3469f9ca0af3:0.procedureResultReporter): State: WAITING Blocked count: 19 Waited count: 39 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@52dfb1c5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 473 (regionserver/3469f9ca0af3:0.procedureResultReporter): State: WAITING Blocked count: 11 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29168254 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 472 (regionserver/3469f9ca0af3:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5b08a52f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 500 (LeaseRenewer:jenkins.hfs.0@localhost:35869): State: TIMED_WAITING Blocked count: 17 Waited count: 613 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 502 (LeaseRenewer:jenkins.hfs.1@localhost:35869): State: TIMED_WAITING Blocked count: 17 Waited count: 614 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 511 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 520 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 521 (region-location-0): State: WAITING Blocked count: 14 Waited count: 21 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2076807e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 525 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 526 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 404 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 58114 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 528 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 554 (ForkJoinPool.commonPool-worker-2): State: WAITING Blocked count: 0 Waited count: 701 Waiting on java.util.concurrent.ForkJoinPool@f085c2c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 579 (region-location-1): State: WAITING Blocked count: 4 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2076807e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 580 (region-location-2): State: WAITING Blocked count: 4 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2076807e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 980 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1191 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1044 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 12 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1074 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1088 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 71 Waited count: 117 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@cae951b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1092 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1093 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1094 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1239 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1240 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1241 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1292 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1293 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1294 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1296 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1297 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1656 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 70 Waiting on java.util.TaskQueue@18cf9180 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1822 (ForkJoinPool.commonPool-worker-4): State: TIMED_WAITING Blocked count: 0 Waited count: 920 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 2015 (region-location-3): State: WAITING Blocked count: 4 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2076807e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2016 (region-location-4): State: WAITING Blocked count: 5 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2076807e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2719 (ForkJoinPool.commonPool-worker-5): State: WAITING Blocked count: 0 Waited count: 482 Waiting on java.util.concurrent.ForkJoinPool@f085c2c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 6810 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6811 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9177 (ForkJoinPool.commonPool-worker-6): State: WAITING Blocked count: 0 Waited count: 131 Waiting on java.util.concurrent.ForkJoinPool@f085c2c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 11573 (AsyncFSWAL-1-hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/MasterData-prefix:3469f9ca0af3,35815,1733741765917): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@55eb4789 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11576 (java.util.concurrent.ThreadPoolExecutor$Worker@5c01fc62[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11578 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-09T11:06:04,511 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T11:06:07,243 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=7, created chunk count=10, reused chunk count=29, reuseRatio=74.36% 2024-12-09T11:06:07,243 DEBUG [master/3469f9ca0af3:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-09T11:06:15,603 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T11:06:34,511 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;3469f9ca0af3:35815 232 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 3 Waited count: 5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 43 Waited count: 22 Waiting on java.lang.ref.ReferenceQueue$Lock@103771d3 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 22 Waited count: 28 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7cc6be6c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 37 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 6534 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 66 Waiting on java.util.concurrent.CountDownLatch$Sync@50e7b474 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 11951 Waited count: 12827 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 17 Waited count: 17 Waiting on java.lang.ref.ReferenceQueue$Lock@729ea902 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@3b624ce3 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@56c8d05a): State: TIMED_WAITING Blocked count: 0 Waited count: 1303 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 131 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1519861075-37): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f6d6042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1519861075-38): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f6d6042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1519861075-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f6d6042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1519861075-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f6d6042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1519861075-41-acceptor-0@173365be-ServerConnector@6b557a24{HTTP/1.1, (http/1.1)}{localhost:36709}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1519861075-42): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1519861075-43): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1519861075-44): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-b2558db-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 41 Waited count: 3556 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b503da Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 35869): State: TIMED_WAITING Blocked count: 1 Waited count: 67 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 131 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@27449207): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 218 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@24d0128): State: TIMED_WAITING Blocked count: 0 Waited count: 131 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 218 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 62969 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1701 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4c4aee56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 35869): State: TIMED_WAITING Blocked count: 51 Waited count: 2828 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 35869): State: TIMED_WAITING Blocked count: 72 Waited count: 2836 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 35869): State: TIMED_WAITING Blocked count: 47 Waited count: 2853 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 35869): State: TIMED_WAITING Blocked count: 57 Waited count: 2848 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 35869): State: TIMED_WAITING Blocked count: 62 Waited count: 2832 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@2b8db2f8): State: TIMED_WAITING Blocked count: 0 Waited count: 326 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@765a473): State: TIMED_WAITING Blocked count: 0 Waited count: 131 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@96ef7ce): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@66f60370): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(304209986)): State: TIMED_WAITING Blocked count: 0 Waited count: 23 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1544511259-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f6d6042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1544511259-88-acceptor-0@15d5b20b-ServerConnector@68913343{HTTP/1.1, (http/1.1)}{localhost:46253}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1544511259-89): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1544511259-90): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-5f9885ae-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@74cdb702): State: TIMED_WAITING Blocked count: 0 Waited count: 1299 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 44147): State: TIMED_WAITING Blocked count: 1 Waited count: 66 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 131 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 2 Waited count: 343 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4efdf487 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1191631881-172.17.0.2-1733741760972 heartbeating to localhost/127.0.0.1:35869): State: TIMED_WAITING Blocked count: 1557 Waited count: 1797 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@1e8d2466): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 44147): State: TIMED_WAITING Blocked count: 0 Waited count: 650 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 44147): State: TIMED_WAITING Blocked count: 0 Waited count: 651 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 44147): State: TIMED_WAITING Blocked count: 0 Waited count: 651 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 44147): State: TIMED_WAITING Blocked count: 0 Waited count: 650 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 44147): State: TIMED_WAITING Blocked count: 0 Waited count: 650 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 112 (IPC Client (1359318497) connection to localhost/127.0.0.1:35869 from jenkins): State: TIMED_WAITING Blocked count: 1793 Waited count: 1794 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 113 (IPC Parameter Sending Thread for localhost/127.0.0.1:35869): State: TIMED_WAITING Blocked count: 0 Waited count: 2346 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp162707258-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f6d6042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp162707258-122-acceptor-0@5e1c866e-ServerConnector@952b611{HTTP/1.1, (http/1.1)}{localhost:41107}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp162707258-123): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp162707258-124): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-70e868c0-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 128 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@21f186f3): State: TIMED_WAITING Blocked count: 0 Waited count: 1298 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 130 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 131 (IPC Server idle connection scanner for port 34213): State: TIMED_WAITING Blocked count: 1 Waited count: 66 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 133 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 130 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (Command processor): State: WAITING Blocked count: 1 Waited count: 328 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3829410f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 138 (BP-1191631881-172.17.0.2-1733741760972 heartbeating to localhost/127.0.0.1:35869): State: TIMED_WAITING Blocked count: 1533 Waited count: 1787 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 139 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@20c9f17d): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 132 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 129 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 140 (IPC Server handler 0 on default port 34213): State: TIMED_WAITING Blocked count: 0 Waited count: 665 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 1 on default port 34213): State: TIMED_WAITING Blocked count: 0 Waited count: 652 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 2 on default port 34213): State: TIMED_WAITING Blocked count: 0 Waited count: 665 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 3 on default port 34213): State: TIMED_WAITING Blocked count: 0 Waited count: 651 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 144 (IPC Server handler 4 on default port 34213): State: TIMED_WAITING Blocked count: 0 Waited count: 651 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 150 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data1)): State: TIMED_WAITING Blocked count: 3 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 151 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data2)): State: TIMED_WAITING Blocked count: 10 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 158 (pool-39-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (qtp12009514-160): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f6d6042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (qtp12009514-163-acceptor-0@61f3d98c-ServerConnector@8ce124a{HTTP/1.1, (http/1.1)}{localhost:35723}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 164 (qtp12009514-164): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 165 (qtp12009514-165): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 166 (Session-HouseKeeper-2d3d98d3-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data2/current/BP-1191631881-172.17.0.2-1733741760972): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 168 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data1/current/BP-1191631881-172.17.0.2-1733741760972): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data3)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 178 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data4)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 180 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@68d63c1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 184 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data3/current/BP-1191631881-172.17.0.2-1733741760972): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 187 (java.util.concurrent.ThreadPoolExecutor$Worker@62bf0be0[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 188 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data4/current/BP-1191631881-172.17.0.2-1733741760972): State: TIMED_WAITING Blocked count: 4 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 192 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27a40c73 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 193 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (java.util.concurrent.ThreadPoolExecutor$Worker@4633d828[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@3fc8bea0): State: TIMED_WAITING Blocked count: 0 Waited count: 1298 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 200 (IPC Server idle connection scanner for port 37743): State: TIMED_WAITING Blocked count: 1 Waited count: 66 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 202 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 130 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (Command processor): State: WAITING Blocked count: 1 Waited count: 401 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e55ffab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 206 (BP-1191631881-172.17.0.2-1733741760972 heartbeating to localhost/127.0.0.1:35869): State: TIMED_WAITING Blocked count: 1531 Waited count: 1781 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@51335b59): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 198 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 208 (IPC Server handler 0 on default port 37743): State: TIMED_WAITING Blocked count: 0 Waited count: 649 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 209 (IPC Server handler 1 on default port 37743): State: TIMED_WAITING Blocked count: 0 Waited count: 649 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 210 (IPC Server handler 2 on default port 37743): State: TIMED_WAITING Blocked count: 0 Waited count: 649 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 3 on default port 37743): State: TIMED_WAITING Blocked count: 0 Waited count: 649 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 4 on default port 37743): State: TIMED_WAITING Blocked count: 0 Waited count: 651 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 215 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 216 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 221 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data6/current/BP-1191631881-172.17.0.2-1733741760972): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 222 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data5/current/BP-1191631881-172.17.0.2-1733741760972): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (pool-36-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@d89039f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (java.util.concurrent.ThreadPoolExecutor$Worker@6a59f49e[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 22 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 237 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 236 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:57831): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 235 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 66 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 239 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 325 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 240 (SyncThread:0): State: WAITING Blocked count: 8 Waited count: 437 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4fbc4e2e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 241 (ProcessThread(sid:0 cport:57831):): State: WAITING Blocked count: 2 Waited count: 533 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@43f8aa7b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 242 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 577 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@628dc60 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 243 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 104 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 254 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@40d5c7 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 516 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 87 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test-SendThread(127.0.0.1:57831)): State: RUNNABLE Blocked count: 25 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 258 (Time-limited test-EventThread): State: WAITING Blocked count: 1 Waited count: 67 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46e61ba7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 259 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 103 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 103 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-4): State: WAITING Blocked count: 0 Waited count: 104 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (zk-event-processor-pool-0): State: WAITING Blocked count: 20 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6a710333 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-5): State: WAITING Blocked count: 4 Waited count: 105 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-6): State: WAITING Blocked count: 3 Waited count: 104 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-7): State: WAITING Blocked count: 3 Waited count: 103 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-8): State: WAITING Blocked count: 1 Waited count: 103 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-9): State: WAITING Blocked count: 2 Waited count: 103 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-10): State: WAITING Blocked count: 6 Waited count: 104 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 103 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-12): State: WAITING Blocked count: 3 Waited count: 104 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 103 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-14): State: WAITING Blocked count: 2 Waited count: 103 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-15): State: WAITING Blocked count: 2 Waited count: 103 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-16): State: WAITING Blocked count: 1 Waited count: 103 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35815): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6c191d57 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815): State: WAITING Blocked count: 182 Waited count: 704 Waiting on java.util.concurrent.Semaphore$NonfairSync@2fc373e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815): State: WAITING Blocked count: 61 Waited count: 365 Waiting on java.util.concurrent.Semaphore$NonfairSync@74b13fe5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35815): State: WAITING Blocked count: 85 Waited count: 13632 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16bbd2db Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35815): State: WAITING Blocked count: 1 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@32ec96fe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35815): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@32ec96fe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=35815): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@51ca125a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=35815): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2cf94191 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=35815): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@649c63df Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=35815): State: WAITING Blocked count: 0 Waited count: 7 Waiting on java.util.concurrent.Semaphore$NonfairSync@329152d4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33c8ce1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 309 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 329 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 46 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 286 (M:0;3469f9ca0af3:35815): State: TIMED_WAITING Blocked count: 12 Waited count: 5247 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1101/0x00007f6d60f95ba0.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 350 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 65 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 352 (master/3469f9ca0af3:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 354 (master/3469f9ca0af3:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 356 (org.apache.hadoop.hdfs.PeerCache@782734c3): State: TIMED_WAITING Blocked count: 0 Waited count: 216 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 375 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 6427 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 392 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 70 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 393 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 45 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 406 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 171 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1fac93d1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 417 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 65 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 405 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 64335 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 426 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 97 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 427 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 114 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 452 (RegionServerTracker-0): State: WAITING Blocked count: 10 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3019739b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 476 (regionserver/3469f9ca0af3:0.procedureResultReporter): State: WAITING Blocked count: 19 Waited count: 39 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@52dfb1c5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 473 (regionserver/3469f9ca0af3:0.procedureResultReporter): State: WAITING Blocked count: 11 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29168254 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 472 (regionserver/3469f9ca0af3:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5b08a52f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 511 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 520 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 521 (region-location-0): State: WAITING Blocked count: 14 Waited count: 21 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2076807e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 525 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 526 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 404 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 64116 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 528 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 554 (ForkJoinPool.commonPool-worker-2): State: WAITING Blocked count: 0 Waited count: 701 Waiting on java.util.concurrent.ForkJoinPool@f085c2c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 579 (region-location-1): State: WAITING Blocked count: 4 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2076807e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 580 (region-location-2): State: WAITING Blocked count: 4 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2076807e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 980 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1197 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1044 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 12 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1074 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1088 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 71 Waited count: 117 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@cae951b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1092 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1093 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1094 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1239 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1240 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1241 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1292 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1293 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1294 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1296 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1297 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1656 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 70 Waiting on java.util.TaskQueue@18cf9180 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2015 (region-location-3): State: WAITING Blocked count: 4 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2076807e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2016 (region-location-4): State: WAITING Blocked count: 5 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2076807e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2719 (ForkJoinPool.commonPool-worker-5): State: WAITING Blocked count: 0 Waited count: 482 Waiting on java.util.concurrent.ForkJoinPool@f085c2c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 6810 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6811 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9177 (ForkJoinPool.commonPool-worker-6): State: TIMED_WAITING Blocked count: 0 Waited count: 132 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 11573 (AsyncFSWAL-1-hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/MasterData-prefix:3469f9ca0af3,35815,1733741765917): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@55eb4789 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11582 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 11583 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-12-09T11:07:04,511 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T11:07:34,511 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;3469f9ca0af3:35815 231 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 3 Waited count: 5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 43 Waited count: 22 Waiting on java.lang.ref.ReferenceQueue$Lock@103771d3 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 22 Waited count: 29 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 32 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7cc6be6c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 40 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 7134 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 72 Waiting on java.util.concurrent.CountDownLatch$Sync@62ad10e7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 11951 Waited count: 12828 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 17 Waited count: 17 Waiting on java.lang.ref.ReferenceQueue$Lock@729ea902 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@3b624ce3 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@56c8d05a): State: TIMED_WAITING Blocked count: 0 Waited count: 1423 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 143 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1519861075-37): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f6d6042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1519861075-38): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f6d6042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1519861075-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f6d6042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1519861075-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f6d6042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1519861075-41-acceptor-0@173365be-ServerConnector@6b557a24{HTTP/1.1, (http/1.1)}{localhost:36709}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1519861075-42): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1519861075-43): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1519861075-44): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-b2558db-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 41 Waited count: 3556 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b503da Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 35869): State: TIMED_WAITING Blocked count: 1 Waited count: 73 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 143 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@27449207): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 238 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@24d0128): State: TIMED_WAITING Blocked count: 0 Waited count: 143 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 238 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 68931 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1701 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4c4aee56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 35869): State: TIMED_WAITING Blocked count: 51 Waited count: 2889 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 35869): State: TIMED_WAITING Blocked count: 72 Waited count: 2897 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 35869): State: TIMED_WAITING Blocked count: 47 Waited count: 2913 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 35869): State: TIMED_WAITING Blocked count: 57 Waited count: 2909 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 35869): State: TIMED_WAITING Blocked count: 62 Waited count: 2893 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@2b8db2f8): State: TIMED_WAITING Blocked count: 0 Waited count: 356 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@765a473): State: TIMED_WAITING Blocked count: 0 Waited count: 143 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@96ef7ce): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@66f60370): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(304209986)): State: TIMED_WAITING Blocked count: 0 Waited count: 25 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1544511259-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f6d6042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1544511259-88-acceptor-0@15d5b20b-ServerConnector@68913343{HTTP/1.1, (http/1.1)}{localhost:46253}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1544511259-89): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1544511259-90): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-5f9885ae-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@74cdb702): State: TIMED_WAITING Blocked count: 0 Waited count: 1419 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 44147): State: TIMED_WAITING Blocked count: 1 Waited count: 73 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 143 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 2 Waited count: 363 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4efdf487 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1191631881-172.17.0.2-1733741760972 heartbeating to localhost/127.0.0.1:35869): State: TIMED_WAITING Blocked count: 1577 Waited count: 1837 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@1e8d2466): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 44147): State: TIMED_WAITING Blocked count: 0 Waited count: 710 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 44147): State: TIMED_WAITING Blocked count: 0 Waited count: 711 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 44147): State: TIMED_WAITING Blocked count: 0 Waited count: 711 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 44147): State: TIMED_WAITING Blocked count: 0 Waited count: 710 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 44147): State: TIMED_WAITING Blocked count: 0 Waited count: 710 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 112 (IPC Client (1359318497) connection to localhost/127.0.0.1:35869 from jenkins): State: TIMED_WAITING Blocked count: 1853 Waited count: 1854 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 113 (IPC Parameter Sending Thread for localhost/127.0.0.1:35869): State: TIMED_WAITING Blocked count: 0 Waited count: 2406 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp162707258-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f6d6042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp162707258-122-acceptor-0@5e1c866e-ServerConnector@952b611{HTTP/1.1, (http/1.1)}{localhost:41107}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp162707258-123): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp162707258-124): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-70e868c0-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 128 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@21f186f3): State: TIMED_WAITING Blocked count: 0 Waited count: 1418 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 130 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 131 (IPC Server idle connection scanner for port 34213): State: TIMED_WAITING Blocked count: 1 Waited count: 72 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 133 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 142 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (Command processor): State: WAITING Blocked count: 1 Waited count: 348 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3829410f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 138 (BP-1191631881-172.17.0.2-1733741760972 heartbeating to localhost/127.0.0.1:35869): State: TIMED_WAITING Blocked count: 1553 Waited count: 1827 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 139 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@20c9f17d): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 132 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 129 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 140 (IPC Server handler 0 on default port 34213): State: TIMED_WAITING Blocked count: 0 Waited count: 726 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 1 on default port 34213): State: TIMED_WAITING Blocked count: 0 Waited count: 712 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 2 on default port 34213): State: TIMED_WAITING Blocked count: 0 Waited count: 726 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 3 on default port 34213): State: TIMED_WAITING Blocked count: 0 Waited count: 711 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 144 (IPC Server handler 4 on default port 34213): State: TIMED_WAITING Blocked count: 0 Waited count: 711 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 150 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data1)): State: TIMED_WAITING Blocked count: 3 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 151 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data2)): State: TIMED_WAITING Blocked count: 10 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 158 (pool-39-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (qtp12009514-160): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f6d6042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (qtp12009514-163-acceptor-0@61f3d98c-ServerConnector@8ce124a{HTTP/1.1, (http/1.1)}{localhost:35723}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 164 (qtp12009514-164): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 165 (qtp12009514-165): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 166 (Session-HouseKeeper-2d3d98d3-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data2/current/BP-1191631881-172.17.0.2-1733741760972): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 168 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data1/current/BP-1191631881-172.17.0.2-1733741760972): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data3)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 178 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data4)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 180 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@68d63c1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 184 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data3/current/BP-1191631881-172.17.0.2-1733741760972): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 187 (java.util.concurrent.ThreadPoolExecutor$Worker@62bf0be0[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 188 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data4/current/BP-1191631881-172.17.0.2-1733741760972): State: TIMED_WAITING Blocked count: 4 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 192 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27a40c73 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 193 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (java.util.concurrent.ThreadPoolExecutor$Worker@4633d828[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@3fc8bea0): State: TIMED_WAITING Blocked count: 0 Waited count: 1418 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 200 (IPC Server idle connection scanner for port 37743): State: TIMED_WAITING Blocked count: 1 Waited count: 72 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 202 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 142 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (Command processor): State: WAITING Blocked count: 1 Waited count: 421 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e55ffab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 206 (BP-1191631881-172.17.0.2-1733741760972 heartbeating to localhost/127.0.0.1:35869): State: TIMED_WAITING Blocked count: 1551 Waited count: 1821 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@51335b59): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 198 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 208 (IPC Server handler 0 on default port 37743): State: TIMED_WAITING Blocked count: 0 Waited count: 709 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 209 (IPC Server handler 1 on default port 37743): State: TIMED_WAITING Blocked count: 0 Waited count: 709 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 210 (IPC Server handler 2 on default port 37743): State: TIMED_WAITING Blocked count: 0 Waited count: 709 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 3 on default port 37743): State: TIMED_WAITING Blocked count: 0 Waited count: 709 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 4 on default port 37743): State: TIMED_WAITING Blocked count: 0 Waited count: 711 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 215 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 216 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 221 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data6/current/BP-1191631881-172.17.0.2-1733741760972): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 222 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data5/current/BP-1191631881-172.17.0.2-1733741760972): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (pool-36-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@d89039f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (java.util.concurrent.ThreadPoolExecutor$Worker@6a59f49e[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 24 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 237 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 236 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:57831): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 235 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 72 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 239 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 355 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 240 (SyncThread:0): State: WAITING Blocked count: 8 Waited count: 441 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4fbc4e2e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 241 (ProcessThread(sid:0 cport:57831):): State: WAITING Blocked count: 2 Waited count: 537 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@43f8aa7b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 242 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 581 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@628dc60 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 243 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 104 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 254 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@40d5c7 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 544 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 87 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test-SendThread(127.0.0.1:57831)): State: RUNNABLE Blocked count: 25 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 258 (Time-limited test-EventThread): State: WAITING Blocked count: 1 Waited count: 67 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46e61ba7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 259 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 104 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 104 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-4): State: WAITING Blocked count: 0 Waited count: 104 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (zk-event-processor-pool-0): State: WAITING Blocked count: 20 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6a710333 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-5): State: WAITING Blocked count: 4 Waited count: 106 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-6): State: WAITING Blocked count: 3 Waited count: 104 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-7): State: WAITING Blocked count: 3 Waited count: 103 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-8): State: WAITING Blocked count: 1 Waited count: 104 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-9): State: WAITING Blocked count: 2 Waited count: 103 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-10): State: WAITING Blocked count: 6 Waited count: 104 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 104 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-12): State: WAITING Blocked count: 3 Waited count: 104 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 104 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-14): State: WAITING Blocked count: 2 Waited count: 103 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-15): State: WAITING Blocked count: 2 Waited count: 104 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-16): State: WAITING Blocked count: 1 Waited count: 104 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35815): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6c191d57 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815): State: WAITING Blocked count: 182 Waited count: 704 Waiting on java.util.concurrent.Semaphore$NonfairSync@2fc373e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815): State: WAITING Blocked count: 61 Waited count: 365 Waiting on java.util.concurrent.Semaphore$NonfairSync@74b13fe5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35815): State: WAITING Blocked count: 85 Waited count: 13632 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16bbd2db Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35815): State: WAITING Blocked count: 1 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@32ec96fe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35815): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@32ec96fe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=35815): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@51ca125a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=35815): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2cf94191 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=35815): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@649c63df Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=35815): State: WAITING Blocked count: 0 Waited count: 7 Waiting on java.util.concurrent.Semaphore$NonfairSync@329152d4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33c8ce1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 309 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 329 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 46 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 286 (M:0;3469f9ca0af3:35815): State: TIMED_WAITING Blocked count: 12 Waited count: 5247 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1101/0x00007f6d60f95ba0.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 350 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 71 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 352 (master/3469f9ca0af3:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 354 (master/3469f9ca0af3:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 356 (org.apache.hadoop.hdfs.PeerCache@782734c3): State: TIMED_WAITING Blocked count: 0 Waited count: 236 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 375 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 7027 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 392 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 70 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 393 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 45 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 406 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 171 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1fac93d1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 417 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 71 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 405 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 70336 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 426 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 97 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 427 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 114 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 452 (RegionServerTracker-0): State: WAITING Blocked count: 10 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3019739b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 476 (regionserver/3469f9ca0af3:0.procedureResultReporter): State: WAITING Blocked count: 19 Waited count: 39 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@52dfb1c5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 473 (regionserver/3469f9ca0af3:0.procedureResultReporter): State: WAITING Blocked count: 11 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29168254 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 472 (regionserver/3469f9ca0af3:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5b08a52f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 511 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 520 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 521 (region-location-0): State: WAITING Blocked count: 14 Waited count: 21 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2076807e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 525 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 526 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 404 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 70118 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 528 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 554 (ForkJoinPool.commonPool-worker-2): State: WAITING Blocked count: 0 Waited count: 701 Waiting on java.util.concurrent.ForkJoinPool@f085c2c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 579 (region-location-1): State: WAITING Blocked count: 4 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2076807e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 580 (region-location-2): State: WAITING Blocked count: 4 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2076807e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 980 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1203 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1044 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 12 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1074 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1088 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 71 Waited count: 117 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@cae951b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1092 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1093 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1094 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1239 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1240 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1241 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1292 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1293 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1294 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1296 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1297 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1656 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 70 Waiting on java.util.TaskQueue@18cf9180 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2015 (region-location-3): State: WAITING Blocked count: 4 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2076807e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2016 (region-location-4): State: WAITING Blocked count: 5 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2076807e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2719 (ForkJoinPool.commonPool-worker-5): State: TIMED_WAITING Blocked count: 0 Waited count: 483 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 6810 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6811 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11573 (AsyncFSWAL-1-hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/MasterData-prefix:3469f9ca0af3,35815,1733741765917): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@55eb4789 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11582 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 11583 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-12-09T11:08:04,512 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T11:08:34,512 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;3469f9ca0af3:35815 229 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 3 Waited count: 5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 43 Waited count: 22 Waiting on java.lang.ref.ReferenceQueue$Lock@103771d3 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 22 Waited count: 30 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7cc6be6c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 43 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 7734 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 78 Waiting on java.util.concurrent.CountDownLatch$Sync@555d53c7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 11951 Waited count: 12829 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 17 Waited count: 17 Waiting on java.lang.ref.ReferenceQueue$Lock@729ea902 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@3b624ce3 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@56c8d05a): State: TIMED_WAITING Blocked count: 0 Waited count: 1543 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 155 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1519861075-37): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f6d6042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1519861075-38): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f6d6042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1519861075-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f6d6042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1519861075-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f6d6042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1519861075-41-acceptor-0@173365be-ServerConnector@6b557a24{HTTP/1.1, (http/1.1)}{localhost:36709}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1519861075-42): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1519861075-43): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1519861075-44): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-b2558db-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 41 Waited count: 3556 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b503da Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 35869): State: TIMED_WAITING Blocked count: 1 Waited count: 79 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 155 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@27449207): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 258 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@24d0128): State: TIMED_WAITING Blocked count: 0 Waited count: 155 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 258 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 74893 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1701 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4c4aee56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 35869): State: TIMED_WAITING Blocked count: 51 Waited count: 2950 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 35869): State: TIMED_WAITING Blocked count: 72 Waited count: 2958 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 35869): State: TIMED_WAITING Blocked count: 47 Waited count: 2974 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 35869): State: TIMED_WAITING Blocked count: 57 Waited count: 2969 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 35869): State: TIMED_WAITING Blocked count: 62 Waited count: 2954 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@2b8db2f8): State: TIMED_WAITING Blocked count: 0 Waited count: 386 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@765a473): State: TIMED_WAITING Blocked count: 0 Waited count: 155 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@96ef7ce): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@66f60370): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(304209986)): State: TIMED_WAITING Blocked count: 0 Waited count: 27 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1544511259-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f6d6042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1544511259-88-acceptor-0@15d5b20b-ServerConnector@68913343{HTTP/1.1, (http/1.1)}{localhost:46253}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1544511259-89): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1544511259-90): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-5f9885ae-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@74cdb702): State: TIMED_WAITING Blocked count: 0 Waited count: 1539 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 44147): State: TIMED_WAITING Blocked count: 1 Waited count: 79 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 155 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 2 Waited count: 383 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4efdf487 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1191631881-172.17.0.2-1733741760972 heartbeating to localhost/127.0.0.1:35869): State: TIMED_WAITING Blocked count: 1597 Waited count: 1877 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@1e8d2466): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 44147): State: TIMED_WAITING Blocked count: 0 Waited count: 770 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 44147): State: TIMED_WAITING Blocked count: 0 Waited count: 771 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 44147): State: TIMED_WAITING Blocked count: 0 Waited count: 771 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 44147): State: TIMED_WAITING Blocked count: 0 Waited count: 770 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 44147): State: TIMED_WAITING Blocked count: 0 Waited count: 770 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 112 (IPC Client (1359318497) connection to localhost/127.0.0.1:35869 from jenkins): State: TIMED_WAITING Blocked count: 1913 Waited count: 1914 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 113 (IPC Parameter Sending Thread for localhost/127.0.0.1:35869): State: TIMED_WAITING Blocked count: 0 Waited count: 2466 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp162707258-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f6d6042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp162707258-122-acceptor-0@5e1c866e-ServerConnector@952b611{HTTP/1.1, (http/1.1)}{localhost:41107}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp162707258-123): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp162707258-124): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-70e868c0-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 128 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@21f186f3): State: TIMED_WAITING Blocked count: 0 Waited count: 1538 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 130 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 131 (IPC Server idle connection scanner for port 34213): State: TIMED_WAITING Blocked count: 1 Waited count: 78 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 133 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 154 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (Command processor): State: WAITING Blocked count: 1 Waited count: 368 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3829410f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 138 (BP-1191631881-172.17.0.2-1733741760972 heartbeating to localhost/127.0.0.1:35869): State: TIMED_WAITING Blocked count: 1573 Waited count: 1867 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 139 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@20c9f17d): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 132 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 129 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 140 (IPC Server handler 0 on default port 34213): State: TIMED_WAITING Blocked count: 0 Waited count: 786 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 1 on default port 34213): State: TIMED_WAITING Blocked count: 0 Waited count: 772 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 2 on default port 34213): State: TIMED_WAITING Blocked count: 0 Waited count: 786 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 3 on default port 34213): State: TIMED_WAITING Blocked count: 0 Waited count: 771 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 144 (IPC Server handler 4 on default port 34213): State: TIMED_WAITING Blocked count: 0 Waited count: 771 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 150 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data1)): State: TIMED_WAITING Blocked count: 3 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 151 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data2)): State: TIMED_WAITING Blocked count: 10 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 158 (pool-39-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (qtp12009514-160): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f6d6042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (qtp12009514-163-acceptor-0@61f3d98c-ServerConnector@8ce124a{HTTP/1.1, (http/1.1)}{localhost:35723}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 164 (qtp12009514-164): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 165 (qtp12009514-165): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 166 (Session-HouseKeeper-2d3d98d3-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data2/current/BP-1191631881-172.17.0.2-1733741760972): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 168 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data1/current/BP-1191631881-172.17.0.2-1733741760972): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data3)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 178 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data4)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 180 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@68d63c1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 184 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data3/current/BP-1191631881-172.17.0.2-1733741760972): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 187 (java.util.concurrent.ThreadPoolExecutor$Worker@62bf0be0[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 188 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data4/current/BP-1191631881-172.17.0.2-1733741760972): State: TIMED_WAITING Blocked count: 4 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 192 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27a40c73 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 193 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (java.util.concurrent.ThreadPoolExecutor$Worker@4633d828[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@3fc8bea0): State: TIMED_WAITING Blocked count: 0 Waited count: 1538 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 200 (IPC Server idle connection scanner for port 37743): State: TIMED_WAITING Blocked count: 1 Waited count: 78 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 202 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 154 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (Command processor): State: WAITING Blocked count: 1 Waited count: 441 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e55ffab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 206 (BP-1191631881-172.17.0.2-1733741760972 heartbeating to localhost/127.0.0.1:35869): State: TIMED_WAITING Blocked count: 1571 Waited count: 1861 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@51335b59): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 198 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 208 (IPC Server handler 0 on default port 37743): State: TIMED_WAITING Blocked count: 0 Waited count: 769 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 209 (IPC Server handler 1 on default port 37743): State: TIMED_WAITING Blocked count: 0 Waited count: 769 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 210 (IPC Server handler 2 on default port 37743): State: TIMED_WAITING Blocked count: 0 Waited count: 769 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 3 on default port 37743): State: TIMED_WAITING Blocked count: 0 Waited count: 769 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 4 on default port 37743): State: TIMED_WAITING Blocked count: 0 Waited count: 771 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 215 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 216 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 221 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data6/current/BP-1191631881-172.17.0.2-1733741760972): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 222 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data5/current/BP-1191631881-172.17.0.2-1733741760972): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (pool-36-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@d89039f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (java.util.concurrent.ThreadPoolExecutor$Worker@6a59f49e[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 26 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 237 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 236 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:57831): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 235 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 78 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 239 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 385 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 240 (SyncThread:0): State: WAITING Blocked count: 8 Waited count: 445 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4fbc4e2e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 241 (ProcessThread(sid:0 cport:57831):): State: WAITING Blocked count: 2 Waited count: 541 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@43f8aa7b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 242 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 585 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@628dc60 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 243 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 105 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 254 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@40d5c7 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 572 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 87 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test-SendThread(127.0.0.1:57831)): State: RUNNABLE Blocked count: 25 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 258 (Time-limited test-EventThread): State: WAITING Blocked count: 1 Waited count: 67 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46e61ba7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 259 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 104 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 104 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-4): State: WAITING Blocked count: 0 Waited count: 105 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (zk-event-processor-pool-0): State: WAITING Blocked count: 20 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6a710333 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-5): State: WAITING Blocked count: 4 Waited count: 106 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-6): State: WAITING Blocked count: 3 Waited count: 105 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-7): State: WAITING Blocked count: 3 Waited count: 104 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-8): State: WAITING Blocked count: 1 Waited count: 104 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-9): State: WAITING Blocked count: 2 Waited count: 104 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-10): State: WAITING Blocked count: 6 Waited count: 105 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 104 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-12): State: WAITING Blocked count: 3 Waited count: 105 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 104 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-14): State: WAITING Blocked count: 2 Waited count: 104 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-15): State: WAITING Blocked count: 2 Waited count: 104 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-16): State: WAITING Blocked count: 1 Waited count: 104 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2da03363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35815): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6c191d57 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815): State: WAITING Blocked count: 182 Waited count: 704 Waiting on java.util.concurrent.Semaphore$NonfairSync@2fc373e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815): State: WAITING Blocked count: 61 Waited count: 365 Waiting on java.util.concurrent.Semaphore$NonfairSync@74b13fe5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35815): State: WAITING Blocked count: 85 Waited count: 13632 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16bbd2db Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35815): State: WAITING Blocked count: 1 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@32ec96fe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35815): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@32ec96fe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=35815): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@51ca125a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=35815): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2cf94191 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=35815): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@649c63df Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=35815): State: WAITING Blocked count: 0 Waited count: 7 Waiting on java.util.concurrent.Semaphore$NonfairSync@329152d4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33c8ce1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 309 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 329 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 46 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 286 (M:0;3469f9ca0af3:35815): State: TIMED_WAITING Blocked count: 12 Waited count: 5247 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1101/0x00007f6d60f95ba0.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 350 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 77 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 352 (master/3469f9ca0af3:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 354 (master/3469f9ca0af3:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 356 (org.apache.hadoop.hdfs.PeerCache@782734c3): State: TIMED_WAITING Blocked count: 0 Waited count: 256 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 375 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 7626 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 392 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 70 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 393 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 45 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 406 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 171 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1fac93d1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 417 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 77 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 405 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 76337 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 426 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 97 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 427 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 114 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 452 (RegionServerTracker-0): State: WAITING Blocked count: 10 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3019739b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 476 (regionserver/3469f9ca0af3:0.procedureResultReporter): State: WAITING Blocked count: 19 Waited count: 39 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@52dfb1c5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 473 (regionserver/3469f9ca0af3:0.procedureResultReporter): State: WAITING Blocked count: 11 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29168254 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 472 (regionserver/3469f9ca0af3:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5b08a52f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 511 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 520 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 521 (region-location-0): State: WAITING Blocked count: 14 Waited count: 21 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2076807e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 525 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 526 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 404 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 76119 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 528 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 554 (ForkJoinPool.commonPool-worker-2): State: TIMED_WAITING Blocked count: 0 Waited count: 702 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 579 (region-location-1): State: WAITING Blocked count: 4 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2076807e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 580 (region-location-2): State: WAITING Blocked count: 4 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2076807e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 980 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1209 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1044 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 12 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1074 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1088 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 71 Waited count: 117 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@cae951b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1092 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1093 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1094 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1239 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1240 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1241 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1292 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1293 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1294 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1296 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1297 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1656 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 70 Waiting on java.util.TaskQueue@18cf9180 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2015 (region-location-3): State: WAITING Blocked count: 4 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2076807e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2016 (region-location-4): State: WAITING Blocked count: 5 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2076807e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6810 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6811 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11573 (AsyncFSWAL-1-hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/MasterData-prefix:3469f9ca0af3,35815,1733741765917): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@55eb4789 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11582 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 16 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-09T11:08:59,342 DEBUG [Time-limited test {}] hbase.LocalHBaseCluster(398): Interrupted java.lang.InterruptedException: null at java.lang.Object.wait(Native Method) ~[?:?] at java.lang.Thread.join(Thread.java:1307) ~[?:?] at org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:111) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:08:59,345 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@64abaf74{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:08:59,346 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@8ce124a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T11:08:59,346 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T11:08:59,346 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1e7a55e0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T11:08:59,346 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4656ace7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/hadoop.log.dir/,STOPPED} 2024-12-09T11:08:59,350 WARN [BP-1191631881-172.17.0.2-1733741760972 heartbeating to localhost/127.0.0.1:35869 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T11:08:59,350 WARN [BP-1191631881-172.17.0.2-1733741760972 heartbeating to localhost/127.0.0.1:35869 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1191631881-172.17.0.2-1733741760972 (Datanode Uuid 11ec4edf-37c8-4ed3-b355-230e28c31c72) service to localhost/127.0.0.1:35869 2024-12-09T11:08:59,350 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T11:08:59,351 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data5/current/BP-1191631881-172.17.0.2-1733741760972 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:08:59,351 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T11:08:59,351 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data6/current/BP-1191631881-172.17.0.2-1733741760972 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:08:59,351 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func ====> TEST TIMED OUT. PRINTING THREAD DUMP. <==== Timestamp: 2024-12-09 11:08:59,346 "Common-Cleaner" daemon prio=8 tid=12 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) at java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) at java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) "Socket Reader #1 for port 0" daemon prio=5 tid=95 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) at app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) "NIOWorkerThread-14" daemon prio=5 tid=272 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@765a473" daemon prio=5 tid=72 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RedundancyMonitor" daemon prio=5 tid=47 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) at java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) at app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data2/current/BP-1191631881-172.17.0.2-1733741760972" daemon prio=5 tid=167 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-6" daemon prio=5 tid=1094 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-15-thread-1" daemon prio=5 tid=180 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "HBase-Metrics2-1" daemon prio=5 tid=255 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "MiniHBaseClusterRegionServer-EventLoopGroup-4-1" daemon prio=10 tid=309 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTi2024-12-09T11:08:59,354 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@51d3e980{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} merChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "surefire-forkedjvm-stream-flusher" daemon prio=5 tid=16 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@24d0128" daemon prio=5 tid=49 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data2)" daemon prio=5 tid=151 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) "IPC Server handler 4 on default port 34213" daemon prio=5 tid=144 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "FsDatasetAsyncDiskServiceFixer" daemon prio=5 tid=234 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) "NIOWorkerThread-6" daemon prio=5 tid=264 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "zk-permission-watcher-pool-0" daemon prio=5 tid=1088 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-1-thread-2" daemon prio=5 tid=15 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) at java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) at java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp1519861075-38" daemon prio=5 tid=38 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) at app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) at app//org.eclipse.jetty.io.ManagedSelect2024-12-09T11:08:59,355 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@952b611{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T11:08:59,355 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging or$SelectorProducer.produce(ManagedSelector.java:543) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) at app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f6d6042d2a8.run(Unknown Source) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RpcClient-timer-pool-0" daemon prio=5 tid=405 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) at app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server listener on 0" daemon prio=5 tid=54 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) "RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35815" daemon prio=5 tid=277 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) at java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) at app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) at app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) "Async-Client-Retry-Timer-pool-0" daemon prio=5 tid=404 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) at app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "MutableQuantiles-0" daemon prio=5 tid=980 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp1519861075-37" daemon prio=5 tid=37 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) at app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) at app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f6d6042d2a8.run(Unknown Source) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "master/3469f9ca0af3:0:becomeActiveMaster-MemStoreChunkPool Statistics" daemon prio=5 tid=354 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "master/3469f9ca0af3:0:becomeActiveMaster-MemStoreChunkPool Statistics" daemon prio=5 tid=352 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Session-HouseKeeper-70e868c0-1" prio=5 tid=125 timed_waiting java.lang.Thr2024-12-09T11:08:59,355 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28a4d72{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T11:08:59,355 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@770eb78{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/hadoop.log.dir/,STOPPED} ead.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "FSEditLogAsync" daemon prio=5 tid=53 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) at app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) at app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "ProcessThread(sid:0 cport:57831):" daemon prio=5 tid=241 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) "nioEventLoopGroup-6-3" prio=10 tid=11588 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) at app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) at app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) at app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "main" prio=5 tid=1 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/java.lang.Thread.dumpThreads(Native Method) at java.base@17.0.11/java.lang.Thread.getAllStackTraces(Thread.java:1671) at app//org.apache.hadoop.hbase.TimedOutTestsListener.buildThreadDump(TimedOutTestsListener.java:92) at app//org.apache.hadoop.hbase.TimedOutTestsListener.buildThreadDiagnosticString(TimedOutTestsListener.java:78) at app//org.apache.hadoop.hbase.TimedOutTestsListener.testFailure(TimedOutTestsListener.java:65) at app//org.junit.runner.notification.SynchronizedRunListener.testFailure(SynchronizedRunListener.java:94) at app//org.junit.runner.notification.RunNotifier$6.notifyListener(RunNotifier.java:177) at app//org.junit.runner.notification.RunNotifier$SafeNotifier.run(RunNotifier.java:72) at app//org.junit.runner.notification.RunNotifier.fireTestFailures(RunNotifier.java:173) at app//org.junit.runner.notification.RunNotifier.fireTestFailure(RunNotifier.java:167) at app//org.apache.maven.surefire.common.junit4.Notifier.fireTestFailure(Notifier.java:100) at app//org.junit.internal.runners.model.EachTestNotifier.addFailure(EachTestNotifier.java:23) at app//org.junit.internal.runners.model.EachTestNotifier.addMultipleFailureException(EachTestNotifier.java:29) at app//org.junit.internal.runners.model.EachTestNotifier.addFailure(EachTestNotifier.java:21) at app//org.junit.runners.ParentRunner.run(ParentRunner.java:419) at app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) at app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) at app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) at app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) at app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) at app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) at app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) at app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) "ForkJoinPool.commonPool-worker-2" daemon prio=5 tid=554 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) at java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) "HMaster-EventLoopGroup-1-1" daemon prio=10 tid=256 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-7-thread-1" prio=5 tid=46 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@1e8d2466" daemon prio=5 tid=85 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) at java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) at app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) at app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp1544511259-87" daemon prio=5 tid=87 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) at app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) at app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f6d6042d2a8.run(Unknown Source) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOWorkerThread-3" daemon prio=5 tid=260 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "MarkedDeleteBlockScrubberThread" daemon prio=5 tid=48 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "SessionTracker" daemon prio=5 tid=239 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) "org.apache.hadoop.hdfs.PeerCache@782734c3" daemon prio=5 tid=356 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) at app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) at app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-11" daemon prio=5 tid=1293 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "region-location-3" daemon prio=5 tid=2015 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 3 on default po2024-12-09T11:08:59,356 WARN [BP-1191631881-172.17.0.2-1733741760972 heartbeating to localhost/127.0.0.1:35869 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T11:08:59,356 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T11:08:59,356 WARN [BP-1191631881-172.17.0.2-1733741760972 heartbeating to localhost/127.0.0.1:35869 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1191631881-172.17.0.2-1733741760972 (Datanode Uuid bf1ce84d-1b3c-4418-b6e9-82203d5f965c) service to localhost/127.0.0.1:35869 2024-12-09T11:08:59,356 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] rt 37743" daemon prio=5 tid=211 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "CacheReplicationMonitor(304209986)" daemon prio=5 tid=75 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) at app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) "org.apache.hadoop.util.JvmPauseMonitor$Monitor@56c8d05a" daemon prio=5 tid=34 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOWorkerThread-12" daemon prio=5 tid=270 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-5" daemon prio=5 tid=1092 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=35815" daemon prio=5 tid=282 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) at java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) at app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) at app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) "RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=35815" daemon prio=5 tid=284 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) at java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) at app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) at app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) "java.util.concurrent.ThreadPoolExecutor$Worker@62bf0be0[State = -1, empty queue]" daemon prio=5 tid=187 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-10" daemon prio=5 tid=1292 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-20-thread-1" prio=5 tid=103 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 4 on default port 37743" daemon prio=5 tid=212 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "IPC Server handler 3 on default port 34213" daemon prio=5 tid=143 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "NIOWorkerThread-13" daemon prio=5 tid=271 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "regionserver/3469f9ca0af3:0.procedureResultReporter" daemon prio=5 tid=472 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) "NIOWorkerThread-9" daemon prio=5 tid=267 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Reference Handler" daemon prio=10 tid=2 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) at java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) at java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) "refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data5/current/BP-1191631881-172.17.0.2-1733741760972" daemon prio=5 tid=222 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "master:store-WAL-Roller" daemon prio=5 tid=375 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) "IPC Server idle connection scanner for port 37743" daemon prio=5 tid=200 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) at java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) "NIOWorkerThread-15" daemon prio=5 tid=273 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(2024-12-09T11:08:59,357 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data3/current/BP-1191631881-172.17.0.2-1733741760972 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "MiniHBaseClusterRegionServer-EventLoopGroup-3-1" daemon prio=10 tid=289 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 3 on default port 35869" daemon prio=5 tid=67 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "IPC Server handler 1 on default port 35869" daemon prio=5 tid=65 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "NIOWorkerThread-16" daemon prio=5 tid=274 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-15" daemon prio=5 tid=6810 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "MiniHBaseClusterRegionServer-EventLoopGroup-5-3" daemon prio=10 tid=393 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "nioEventLoopGroup-7-3" prio=10 tid=11591 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) at app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) at app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) at app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@96ef7ce" daemon prio=5 tid=73 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@2b8db2f8" daemon prio=5 tid=71 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hdfs.se2024-12-09T11:08:59,358 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data4/current/BP-1191631881-172.17.0.2-1733741760972 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:08:59,358 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func rver.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Command processor" daemon prio=5 tid=205 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) "org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@66f60370" daemon prio=5 tid=74 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server listener on 0" daemon prio=5 tid=94 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) "regionserver/3469f9ca0af3:0.procedureResultReporter" daemon prio=5 tid=476 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) "nioEventLoopGroup-6-2" prio=10 tid=11587 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) at app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) at app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) at app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "M:0;3469f9ca0af3:35815" daemon prio=5 tid=286 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) at app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) at app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) at app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) at app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) at app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1101/0x00007f6d60f95ba0.run(Unknown Source) at app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) at app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) at app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) at app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) at app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) at app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) at app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) at app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) at app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) at app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) at app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) at app//org.apache.hadoop.hbase.master.region.MasterRegion.closeRegion(MasterRegion.java:132) at app//org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:205) at app//org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) at app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=35815" daemon prio=5 tid=283 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) at java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) at app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) at app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) "region-location-0" daemon prio=5 tid=521 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp1544511259-88-acceptor-0@15d5b20b-ServerConnector@68913343{HTTP/1.1, (http/1.1)}{localhost:46253}" daemon prio=3 tid=88 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) at app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) at app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "region-location-4" daemon prio=5 tid=2016 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 1 on default port 34213" daemon prio=5 tid=141 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "nioEventLoopGroup-6-1" prio=10 tid=193 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) at app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) at app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) at app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "HMaster-EventLoopGroup-1-3" daemon prio=10 tid=427 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=35815" daemon prio=5 tid=285 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) at java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) at app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) at app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) "RPCClient-NioEventLoopGroup-6-7" daemon prio=5 tid=1239 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 0 on default port 37743" daemon prio=5 tid=208 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "pool-6-thread-1" prio=5 tid=36 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server idle connection scanner for port 34213" daemon prio=5 tid=131 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) at java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) "org.apache.hadoop.util.JvmPauseMonitor$Monitor@21f186f3" daemon prio=5 tid=128 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "nioEventLoopGroup-7-2" prio=10 tid=11590 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) at app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) at app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) at app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server Responder" daemon prio=5 tid=97 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) at app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) at app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) "NIOWorkerThread-2" daemon prio=5 tid=259 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Hadoop-Metrics-Updater-0" daemon prio=5 tid=133 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data1/current/BP-1191631881-172.17.0.2-1733741760972" daemon prio=5 tid=168 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 4 on default port 35869" daemon prio=5 tid=68 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "MiniHBaseClusterRegionServer-EventLoopGroup-4-2" daemon prio=10 tid=1074 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "nioEventLoopGroup-4-1" prio=10 tid=127 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) at app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "BP-1191631881-172.17.0.2-1733741760972 heartbeating to localhost/127.0.0.1:35869" daemon prio=5 tid=138 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-8" daemon prio=5 tid=1240 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOWorkerThread-1" daemon prio=5 tid=243 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "nioEventLoopGroup-2-1" prio=10 tid=92 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) at app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 0 on default port 35869" daemon prio=5 tid=64 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "RPCClient-NioEventLoopGroup-6-3" daemon prio=5 tid=528 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.util.JvmPauseMonitor$Monitor@3fc8bea0" daemon prio=5 tid=197 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35815" daemon prio=5 tid=278 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) at java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) at app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) at app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) "SSL Certificates Store Monitor" daemon prio=5 tid=25 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.lang.Object.wait(Object.java:338) at java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) at java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) "MiniHBaseClusterRegionServer-EventLoopGroup-4-3" daemon prio=10 tid=1093 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-1-thread-1" daemon prio=5 tid=14 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) at java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) at java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server listener on 0" daemon prio=5 tid=129 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) "refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data6/current/BP-1191631881-172.17.0.2-1733741760972" daemon prio=5 tid=221 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-12-thread-1" prio=5 tid=69 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-16" daemon prio=5 tid=6811 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Block report processor" daemon prio=5 tid=51 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) at app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) at app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) "qtp1519861075-40" daemon prio=5 tid=40 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) at app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) at app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f6d6042d2a8.run(Unknown Source) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "AsyncFSWAL-1-hdfs://localhost:35869/user/jenkins/test-data/68b9b708-4645-1b74-583e-132cd6fd8ce6/MasterData-prefix:3469f9ca0af3,35815,1733741765917" daemon prio=5 tid=11573 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "SyncThread:0" daemon prio=5 tid=240 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) "qtp1519861075-43" daemon prio=5 tid=43 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Parameter Sending Thread for localhost/127.0.0.1:35869" daemon prio=5 tid=113 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) at java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) at app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100" daemon prio=5 tid=35 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) "qtp1544511259-90" daemon prio=5 tid=90 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Hadoop-Metrics-Updater-0" daemon prio=5 tid=58 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server idle connection scanner for port 35869" daemon prio=5 tid=56 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) at java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) "Command processor" daemon prio=5 tid=137 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) "NIOWorkerThread-7" daemon prio=5 tid=265 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "region-location-2" daemon prio=5 tid=580 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 4 on default port 44147" daemon prio=5 tid=108 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "MiniHBaseClusterRegionServer-EventLoopGroup-3-3" daemon prio=10 tid=520 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp162707258-123" daemon prio=5 tid=123 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "HMaster-EventLoopGroup-1-2" daemon prio=10 tid=426 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Monitor thread for TaskMonitor" daemon prio=5 tid=350 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "region-location-1" daemon prio=5 tid=579 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-26-thread-1" prio=5 tid=120 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Finalizer" daemon prio=8 tid=3 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) at java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) at java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) "MiniHBaseClusterRegionServer-EventLoopGroup-5-2" daemon prio=10 tid=392 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-12" daemon prio=5 tid=1294 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Time-limited test-SendThread(127.0.0.1:57831)" daemon prio=5 tid=257 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) at app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) at app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) "pool-29-thread-1" prio=5 tid=139 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Hadoop-Metrics-Updater-0" daemon prio=5 tid=202 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "BP-1191631881-172.17.0.2-1733741760972 heartbeating to localhost/127.0.0.1:35869" daemon prio=5 tid=206 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data1)" daemon prio=5 tid=150 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) "MiniHBaseClusterRegionServer-EventLoopGroup-5-1" daemon prio=10 tid=329 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Time-limited test-EventThread" daemon prio=5 tid=258 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) "NIOWorkerThread-5" daemon prio=5 tid=263 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Socket Reader #1 for port 0" daemon prio=5 tid=199 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) at app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) "java.util.concurrent.ThreadPoolExecutor$Worker@4633d828[State = -1, empty queue]" daemon prio=5 tid=196 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server Responder" daemon prio=5 tid=201 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) at app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) at app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) "zk-event-processor-pool-0" daemon prio=5 tid=262 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-14" daemon prio=5 tid=1297 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "SnapshotHandlerChoreCleaner" daemon prio=5 tid=417 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RegionServerTracker-0" daemon prio=5 tid=452 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-4" daemon prio=5 tid=1044 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOServerCxnFactory.SelectorThread-0" daemon prio=5 tid=236 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) at app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) "RPCClient-NioEventLoopGroup-6-2" daemon prio=5 tid=526 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Time-limited test" daemon prio=5 tid=22 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/java.lang.Thread.interrupt0(Native Method) at java.base@17.0.11/java.lang.Thread.interrupt(Thread.java:1011) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.interruptIfStarted(ThreadPoolExecutor.java:670) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.interruptWorkers(ThreadPoolExecutor.java:769) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.shutdownNow(ThreadPoolExecutor.java:1417) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor.shutdownNow(ScheduledThreadPoolExecutor.java:870) at java.base@17.0.11/java.util.concurrent.Executors$DelegatedExecutorService.shutdownNow(Executors.java:727) at app//org.apache.hadoop.hdfs.server.datanode.checker.ThrottledAsyncChecker.shutdownAndWait(ThrottledAsyncChecker.java:199) at app//org.apache.hadoop.hdfs.server.datanode.checker.DatasetVolumeChecker.shutdownAndWait(DatasetVolumeChecker.java:431) at app//org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:2579) at app//org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNode(MiniDFSCluster.java:2232) at app//org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:2222) at app//org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2201) at app//org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) at app//org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) at app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) at app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) at app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) at java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) at app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at app//org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at app//org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Session-HouseKeeper-b2558db-1" prio=5 tid=45 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-9" daemon prio=5 tid=1241 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35815" daemon prio=5 tid=281 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) at app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) "Container metrics unregistration" daemon prio=5 tid=1656 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.lang.Object.wait(Object.java:338) at java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) at java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) "VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data3)" daemon prio=5 tid=176 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) "IPC Server listener on 0" daemon prio=5 tid=198 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) "IPC Server handler 2 on default port 37743" daemon prio=5 tid=210 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data4/current/BP-1191631881-172.17.0.2-1733741760972" daemon prio=5 tid=188 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOWorkerThread-4" daemon prio=5 tid=261 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server idle connection scanner for port 44147" daemon prio=5 tid=96 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) at java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) "IPC Server handler 0 on default port 34213" daemon prio=5 tid=140 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@20c9f17d" daemon prio=5 tid=119 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) at java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) at app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) at app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35815" daemon prio=5 tid=276 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) at java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) at app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) at app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) "nioEventLoopGroup-7-1" prio=10 tid=11589 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) at app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) at app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) at app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 3 on default port 44147" daemon prio=5 tid=107 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "IPC Server handler 1 on default port 44147" daemon prio=5 tid=105 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "RPCClient-NioEventLoopGroup-6-13" daemon prio=5 tid=1296 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp162707258-124" daemon prio=5 tid=124 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "ConnnectionExpirer" daemon prio=5 tid=235 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) "Notification Thread" daemon prio=9 tid=13 runnable java.lang.Thread.State: RUNNABLE "refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data3/current/BP-1191631881-172.17.0.2-1733741760972" daemon prio=5 tid=184 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Command processor" daemon prio=5 tid=101 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) "qtp1519861075-42" daemon prio=5 tid=42 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner" daemon prio=5 tid=23 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) at java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) at app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Session-HouseKeeper-5f9885ae-1" prio=5 tid=91 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@27449207" daemon prio=5 tid=61 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp1519861075-41-acceptor-0@173365be-ServerConnector@6b557a24{HTTP/1.1, (http/1.1)}{localhost:36709}" daemon prio=3 tid=41 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) at app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) at app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Hadoop-Metrics-Updater-0" daemon prio=5 tid=98 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 2 on default port 35869" daemon prio=5 tid=66 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "pool-36-thread-1" daemon prio=5 tid=226 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.util.JvmPauseMonitor$Monitor@74cdb702" daemon prio=5 tid=93 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp1519861075-44" daemon prio=5 tid=44 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-18-thread-1" prio=5 tid=86 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)2024-12-09T11:08:59,364 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@17559536{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:08:59,365 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@68913343{HTTP/1.1, (http/1.1)}{localhost:0} at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 1 on default port 37743" daemon prio=5 tid=209 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "NIOWorkerThread-8" daemon prio=5 tid=266 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RequestThrottler" daemon prio=5 tid=242 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) "IPC Server Responder" daemon prio=5 tid=57 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) at app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) at app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) "qtp162707258-122-acceptor-0@5e1c866e-ServerConnector@952b611{HTTP/1.1, (http/1.1)}{localhost:41107}" daemon prio=3 tid=122 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) at app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) at app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35815" daemon prio=5 tid=280 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) at app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) "qtp1544511259-89" daemon prio=5 tid=89 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "regionserver/3469f9ca0af3:0.procedureResultReporter" daemon prio=5 tid=473 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) "Socket Reader #1 for port 0" daemon prio=5 tid=55 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hadoop.ipc.Server$Listener$Reader.doRu2024-12-09T11:08:59,365 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T11:08:59,365 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6c0b65f0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T11:08:59,365 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@65dbe8a3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/hadoop.log.dir/,STOPPED} nLoop(Server.java:1497) at app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) "Idle-Rpc-Conn-Sweeper-pool-0" daemon prio=5 tid=406 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "BP-1191631881-172.17.0.2-1733741760972 heartbeating to localhost/127.0.0.1:35869" daemon prio=5 tid=102 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 2 on default port 44147" daemon prio=5 tid=106 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "NIOWorkerThread-11" daemon prio=5 tid=269 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "weak-ref-cleaner-strictcontextstorage" daemon prio=1 tid=254 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) at java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) at app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp1519861075-39" daemon prio=5 tid=39 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) at app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) at app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f6d6042d2a8.run(Unknown Source) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 0 on default port 44147" daemon prio=5 tid=104 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "NIOServerCxnFactory.SelectorThread-1" daemon prio=5 tid=237 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) at app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) "Time-limited test.named-queue-events-pool-0" daemon prio=5 tid=288 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) at app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) at app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) at app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server Responder" daemon prio=5 tid=132 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) at app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) at app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) "MiniHBaseClusterRegionServer-EventLoopGroup-3-2" daemon prio=10 tid=511 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Signal Dispatcher" daemon prio=9 tid=4 runnable java.lang.Thread.State: RUNNABLE "NIOWorkerThread-10" daemon prio=5 tid=268 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Timer for 'JobHistoryServer' metrics system" daemon prio=5 tid=11582 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) at java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) "Socket Reader #1 for port 0" daemon prio=5 tid=130 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) at app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) "VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data4)" daemon prio=5 tid=178 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) "RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35815" daemon prio=5 tid=279 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) at app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) "IPC Server handler 2 on default port 34213" daemon prio=5 tid=142 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "qtp162707258-121" daemon prio=5 tid=121 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) at app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) at app//org.ec2024-12-09T11:08:59,366 WARN [BP-1191631881-172.17.0.2-1733741760972 heartbeating to localhost/127.0.0.1:35869 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted lipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f6d6042d2a8.run(Unknown Source) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Client (1359318497) connection to localhost/127.0.0.1:35869 from jenkins" daemon prio=5 tid=112 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) at app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) "DatanodeAdminMonitor-0" daemon prio=5 tid=62 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "surefire-forkedjvm-command-thread" daemon prio=5 tid=18 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) at java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) at java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) at app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) at java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) at java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) at java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) at app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) at app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) at app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) at app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) at app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) at app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) at app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) at app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-23-thread-1" daemon prio=5 tid=192 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-1" daemon prio=5 tid=525 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:57831" daemon prio=5 tid=238 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) at app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) 2024-12-09T11:08:59,366 WARN [BP-1191631881-172.17.0.2-1733741760972 heartbeating to localhost/127.0.0.1:35869 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1191631881-172.17.0.2-1733741760972 (Datanode Uuid f40cb86e-bff3-43c1-9868-6a09b88b8d36) service to localhost/127.0.0.1:35869 2024-12-09T11:08:59,366 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T11:08:59,366 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T11:08:59,367 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data1/current/BP-1191631881-172.17.0.2-1733741760972 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:08:59,367 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/8877ed0c-d9ab-97c1-36d9-3c325c109961/cluster_f5fba41a-0ffc-e349-5239-33b0ca6638d0/data/data2/current/BP-1191631881-172.17.0.2-1733741760972 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:08:59,367 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func