2024-12-01 17:05:28,766 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@790da477 2024-12-01 17:05:28,802 main DEBUG Took 0.022554 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-01 17:05:28,802 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-01 17:05:28,803 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-01 17:05:28,804 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-01 17:05:28,806 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 17:05:28,828 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-01 17:05:28,874 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 17:05:28,880 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 17:05:28,881 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 17:05:28,882 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 17:05:28,886 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 17:05:28,887 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 17:05:28,892 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 17:05:28,893 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 17:05:28,895 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 17:05:28,896 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 17:05:28,898 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 17:05:28,899 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 17:05:28,901 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 17:05:28,902 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 17:05:28,904 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 17:05:28,905 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 17:05:28,906 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 17:05:28,907 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 17:05:28,909 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 17:05:28,909 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 17:05:28,911 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 17:05:28,913 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 17:05:28,916 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 17:05:28,918 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 17:05:28,921 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 17:05:28,921 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-01 17:05:28,949 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 17:05:28,951 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-01 17:05:28,955 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-01 17:05:28,972 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-01 17:05:28,974 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-01 17:05:28,975 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-01 17:05:28,986 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-01 17:05:28,991 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-01 17:05:28,993 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-01 17:05:28,994 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-01 17:05:28,995 main DEBUG createAppenders(={Console}) 2024-12-01 17:05:28,996 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@790da477 initialized 2024-12-01 17:05:28,996 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@790da477 2024-12-01 17:05:28,996 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@790da477 OK. 2024-12-01 17:05:28,997 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-01 17:05:28,998 main DEBUG OutputStream closed 2024-12-01 17:05:28,998 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-01 17:05:28,998 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-01 17:05:28,999 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@5a56cdac OK 2024-12-01 17:05:29,212 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-01 17:05:29,217 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-01 17:05:29,219 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-01 17:05:29,220 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-01 17:05:29,221 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-01 17:05:29,222 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-01 17:05:29,222 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-01 17:05:29,223 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-01 17:05:29,223 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-01 17:05:29,224 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-01 17:05:29,224 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-01 17:05:29,225 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-01 17:05:29,225 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-01 17:05:29,226 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-01 17:05:29,226 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-01 17:05:29,227 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-01 17:05:29,227 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-01 17:05:29,229 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-01 17:05:29,240 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-01 17:05:29,242 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6f63b475) with optional ClassLoader: null 2024-12-01 17:05:29,242 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-01 17:05:29,243 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6f63b475] started OK. 2024-12-01T17:05:29,264 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestExportSnapshot timeout: 13 mins 2024-12-01 17:05:29,268 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-01 17:05:29,269 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-01T17:05:29,749 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f 2024-12-01T17:05:29,750 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestSecureExportSnapshot timeout: 13 mins 2024-12-01T17:05:29,832 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-01T17:05:30,062 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-01T17:05:30,087 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74, deleteOnExit=true 2024-12-01T17:05:30,087 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-01T17:05:30,089 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/test.cache.data in system properties and HBase conf 2024-12-01T17:05:30,090 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/hadoop.tmp.dir in system properties and HBase conf 2024-12-01T17:05:30,091 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/hadoop.log.dir in system properties and HBase conf 2024-12-01T17:05:30,091 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-01T17:05:30,104 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-01T17:05:30,104 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-01T17:05:30,201 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-01T17:05:30,207 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-01T17:05:30,208 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-01T17:05:30,208 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-01T17:05:30,209 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-01T17:05:30,210 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-01T17:05:30,210 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-01T17:05:30,211 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-01T17:05:30,211 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-01T17:05:30,212 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-01T17:05:30,212 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/nfs.dump.dir in system properties and HBase conf 2024-12-01T17:05:30,213 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/java.io.tmpdir in system properties and HBase conf 2024-12-01T17:05:30,213 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-01T17:05:30,214 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-01T17:05:30,214 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-01T17:05:31,654 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-01T17:05:31,774 INFO [Time-limited test {}] log.Log(170): Logging initialized @4189ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-01T17:05:31,905 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T17:05:32,005 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T17:05:32,049 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T17:05:32,050 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T17:05:32,052 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-01T17:05:32,073 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T17:05:32,080 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@37667246{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/hadoop.log.dir/,AVAILABLE} 2024-12-01T17:05:32,082 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1e7cf71{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T17:05:32,314 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@79cacdbc{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/java.io.tmpdir/jetty-localhost-40833-hadoop-hdfs-3_4_1-tests_jar-_-any-402938103725674848/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-01T17:05:32,323 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@39eef6d0{HTTP/1.1, (http/1.1)}{localhost:40833} 2024-12-01T17:05:32,323 INFO [Time-limited test {}] server.Server(415): Started @4739ms 2024-12-01T17:05:33,035 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T17:05:33,043 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T17:05:33,045 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T17:05:33,045 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T17:05:33,046 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-01T17:05:33,047 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6887c008{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/hadoop.log.dir/,AVAILABLE} 2024-12-01T17:05:33,047 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@608fba0d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T17:05:33,176 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5f198833{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/java.io.tmpdir/jetty-localhost-43843-hadoop-hdfs-3_4_1-tests_jar-_-any-11683260224233284781/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T17:05:33,177 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@451b769{HTTP/1.1, (http/1.1)}{localhost:43843} 2024-12-01T17:05:33,178 INFO [Time-limited test {}] server.Server(415): Started @5593ms 2024-12-01T17:05:33,249 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-01T17:05:33,448 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T17:05:33,455 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T17:05:33,461 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T17:05:33,461 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T17:05:33,461 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-01T17:05:33,465 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2c7367e1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/hadoop.log.dir/,AVAILABLE} 2024-12-01T17:05:33,466 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5614c0c4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T17:05:33,610 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1585fad{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/java.io.tmpdir/jetty-localhost-41601-hadoop-hdfs-3_4_1-tests_jar-_-any-12103409376148750041/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T17:05:33,611 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2eec8e07{HTTP/1.1, (http/1.1)}{localhost:41601} 2024-12-01T17:05:33,612 INFO [Time-limited test {}] server.Server(415): Started @6027ms 2024-12-01T17:05:33,615 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-01T17:05:33,691 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T17:05:33,697 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T17:05:33,699 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T17:05:33,700 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T17:05:33,700 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-01T17:05:33,701 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2bd013ba{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/hadoop.log.dir/,AVAILABLE} 2024-12-01T17:05:33,702 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7aab5215{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T17:05:33,832 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5bae1b2c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/java.io.tmpdir/jetty-localhost-33229-hadoop-hdfs-3_4_1-tests_jar-_-any-7857526918178530127/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T17:05:33,833 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@19f9049{HTTP/1.1, (http/1.1)}{localhost:33229} 2024-12-01T17:05:33,834 INFO [Time-limited test {}] server.Server(415): Started @6249ms 2024-12-01T17:05:33,837 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-01T17:05:35,666 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data1/current/BP-371634539-172.17.0.3-1733072730982/current, will proceed with Du for space computation calculation, 2024-12-01T17:05:35,666 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data4/current/BP-371634539-172.17.0.3-1733072730982/current, will proceed with Du for space computation calculation, 2024-12-01T17:05:35,666 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data2/current/BP-371634539-172.17.0.3-1733072730982/current, will proceed with Du for space computation calculation, 2024-12-01T17:05:35,666 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data3/current/BP-371634539-172.17.0.3-1733072730982/current, will proceed with Du for space computation calculation, 2024-12-01T17:05:35,702 WARN [Thread-132 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data5/current/BP-371634539-172.17.0.3-1733072730982/current, will proceed with Du for space computation calculation, 2024-12-01T17:05:35,702 WARN [Thread-133 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data6/current/BP-371634539-172.17.0.3-1733072730982/current, will proceed with Du for space computation calculation, 2024-12-01T17:05:35,746 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-01T17:05:35,747 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-01T17:05:35,760 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-01T17:05:35,821 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x25f98b68e458812a with lease ID 0x1f1f063a48c3160d: Processing first storage report for DS-b6e128c0-a4bb-411c-90ad-04cc93436444 from datanode DatanodeRegistration(127.0.0.1:44603, datanodeUuid=b8da1b8e-1d50-48ab-8b09-d6e13300af61, infoPort=44297, infoSecurePort=0, ipcPort=41295, storageInfo=lv=-57;cid=testClusterID;nsid=1276554394;c=1733072730982) 2024-12-01T17:05:35,823 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x25f98b68e458812a with lease ID 0x1f1f063a48c3160d: from storage DS-b6e128c0-a4bb-411c-90ad-04cc93436444 node DatanodeRegistration(127.0.0.1:44603, datanodeUuid=b8da1b8e-1d50-48ab-8b09-d6e13300af61, infoPort=44297, infoSecurePort=0, ipcPort=41295, storageInfo=lv=-57;cid=testClusterID;nsid=1276554394;c=1733072730982), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-01T17:05:35,824 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8e991cb7fb4cfc08 with lease ID 0x1f1f063a48c3160f: Processing first storage report for DS-77e20430-c1a6-42bb-bd62-c245d594c058 from datanode DatanodeRegistration(127.0.0.1:36291, datanodeUuid=97eaf1fc-87b0-40ad-a118-b65f75a1b831, infoPort=34403, infoSecurePort=0, ipcPort=45669, storageInfo=lv=-57;cid=testClusterID;nsid=1276554394;c=1733072730982) 2024-12-01T17:05:35,824 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8e991cb7fb4cfc08 with lease ID 0x1f1f063a48c3160f: from storage DS-77e20430-c1a6-42bb-bd62-c245d594c058 node DatanodeRegistration(127.0.0.1:36291, datanodeUuid=97eaf1fc-87b0-40ad-a118-b65f75a1b831, infoPort=34403, infoSecurePort=0, ipcPort=45669, storageInfo=lv=-57;cid=testClusterID;nsid=1276554394;c=1733072730982), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T17:05:35,824 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8e991cb7fb4cfc08 with lease ID 0x1f1f063a48c3160f: Processing first storage report for DS-363b9183-b242-4096-aa39-c42ed7f14241 from datanode DatanodeRegistration(127.0.0.1:36291, datanodeUuid=97eaf1fc-87b0-40ad-a118-b65f75a1b831, infoPort=34403, infoSecurePort=0, ipcPort=45669, storageInfo=lv=-57;cid=testClusterID;nsid=1276554394;c=1733072730982) 2024-12-01T17:05:35,824 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8e991cb7fb4cfc08 with lease ID 0x1f1f063a48c3160f: from storage DS-363b9183-b242-4096-aa39-c42ed7f14241 node DatanodeRegistration(127.0.0.1:36291, datanodeUuid=97eaf1fc-87b0-40ad-a118-b65f75a1b831, infoPort=34403, infoSecurePort=0, ipcPort=45669, storageInfo=lv=-57;cid=testClusterID;nsid=1276554394;c=1733072730982), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T17:05:35,825 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x25f98b68e458812a with lease ID 0x1f1f063a48c3160d: Processing first storage report for DS-5b6a2cc6-a5f2-40bb-9472-52c68c20a058 from datanode DatanodeRegistration(127.0.0.1:44603, datanodeUuid=b8da1b8e-1d50-48ab-8b09-d6e13300af61, infoPort=44297, infoSecurePort=0, ipcPort=41295, storageInfo=lv=-57;cid=testClusterID;nsid=1276554394;c=1733072730982) 2024-12-01T17:05:35,825 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x25f98b68e458812a with lease ID 0x1f1f063a48c3160d: from storage DS-5b6a2cc6-a5f2-40bb-9472-52c68c20a058 node DatanodeRegistration(127.0.0.1:44603, datanodeUuid=b8da1b8e-1d50-48ab-8b09-d6e13300af61, infoPort=44297, infoSecurePort=0, ipcPort=41295, storageInfo=lv=-57;cid=testClusterID;nsid=1276554394;c=1733072730982), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-01T17:05:35,830 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x80e1d80ed48c09f with lease ID 0x1f1f063a48c3160e: Processing first storage report for DS-66fd9c32-3ad4-437a-8b5c-d300efc8d3c7 from datanode DatanodeRegistration(127.0.0.1:36379, datanodeUuid=a892d38a-cd18-41ed-bb98-de9c143d2f76, infoPort=45899, infoSecurePort=0, ipcPort=38135, storageInfo=lv=-57;cid=testClusterID;nsid=1276554394;c=1733072730982) 2024-12-01T17:05:35,830 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x80e1d80ed48c09f with lease ID 0x1f1f063a48c3160e: from storage DS-66fd9c32-3ad4-437a-8b5c-d300efc8d3c7 node DatanodeRegistration(127.0.0.1:36379, datanodeUuid=a892d38a-cd18-41ed-bb98-de9c143d2f76, infoPort=45899, infoSecurePort=0, ipcPort=38135, storageInfo=lv=-57;cid=testClusterID;nsid=1276554394;c=1733072730982), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-01T17:05:35,831 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x80e1d80ed48c09f with lease ID 0x1f1f063a48c3160e: Processing first storage report for DS-062b3ab2-f9d9-40cf-be04-35bf2904e23e from datanode DatanodeRegistration(127.0.0.1:36379, datanodeUuid=a892d38a-cd18-41ed-bb98-de9c143d2f76, infoPort=45899, infoSecurePort=0, ipcPort=38135, storageInfo=lv=-57;cid=testClusterID;nsid=1276554394;c=1733072730982) 2024-12-01T17:05:35,831 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x80e1d80ed48c09f with lease ID 0x1f1f063a48c3160e: from storage DS-062b3ab2-f9d9-40cf-be04-35bf2904e23e node DatanodeRegistration(127.0.0.1:36379, datanodeUuid=a892d38a-cd18-41ed-bb98-de9c143d2f76, infoPort=45899, infoSecurePort=0, ipcPort=38135, storageInfo=lv=-57;cid=testClusterID;nsid=1276554394;c=1733072730982), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T17:05:35,859 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f 2024-12-01T17:05:35,966 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/zookeeper_0, clientPort=56318, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-01T17:05:35,983 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56318 2024-12-01T17:05:36,007 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T17:05:36,013 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T17:05:36,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741825_1001 (size=7) 2024-12-01T17:05:36,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741825_1001 (size=7) 2024-12-01T17:05:36,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741825_1001 (size=7) 2024-12-01T17:05:36,681 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 with version=8 2024-12-01T17:05:36,682 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/hbase-staging 2024-12-01T17:05:36,811 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-01T17:05:37,103 INFO [Time-limited test {}] client.ConnectionUtils(128): master/b87883a7882d:0 server-side Connection retries=45 2024-12-01T17:05:37,113 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T17:05:37,114 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T17:05:37,120 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T17:05:37,120 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T17:05:37,120 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T17:05:37,278 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-01T17:05:37,334 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-01T17:05:37,342 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-01T17:05:37,345 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T17:05:37,370 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 116604 (auto-detected) 2024-12-01T17:05:37,371 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:03 (auto-detected) 2024-12-01T17:05:37,390 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:38737 2024-12-01T17:05:37,413 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38737 connecting to ZooKeeper ensemble=127.0.0.1:56318 2024-12-01T17:05:37,494 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:387370x0, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T17:05:37,496 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38737-0x10191db4a6c0000 connected 2024-12-01T17:05:37,594 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T17:05:37,598 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T17:05:37,610 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T17:05:37,615 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546, hbase.cluster.distributed=false 2024-12-01T17:05:37,664 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T17:05:37,670 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38737 2024-12-01T17:05:37,671 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38737 2024-12-01T17:05:37,672 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38737 2024-12-01T17:05:37,675 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38737 2024-12-01T17:05:37,676 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38737 2024-12-01T17:05:37,781 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/b87883a7882d:0 server-side Connection retries=45 2024-12-01T17:05:37,783 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T17:05:37,783 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T17:05:37,783 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T17:05:37,783 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T17:05:37,784 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T17:05:37,786 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-01T17:05:37,789 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T17:05:37,796 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:44753 2024-12-01T17:05:37,799 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44753 connecting to ZooKeeper ensemble=127.0.0.1:56318 2024-12-01T17:05:37,801 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T17:05:37,807 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T17:05:37,834 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:447530x0, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T17:05:37,835 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:447530x0, quorum=127.0.0.1:56318, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T17:05:37,836 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44753-0x10191db4a6c0001 connected 2024-12-01T17:05:37,840 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-01T17:05:37,848 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-01T17:05:37,852 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-01T17:05:37,859 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T17:05:37,860 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44753 2024-12-01T17:05:37,862 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44753 2024-12-01T17:05:37,865 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44753 2024-12-01T17:05:37,868 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44753 2024-12-01T17:05:37,874 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44753 2024-12-01T17:05:37,896 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/b87883a7882d:0 server-side Connection retries=45 2024-12-01T17:05:37,896 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T17:05:37,896 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T17:05:37,897 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T17:05:37,897 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T17:05:37,897 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T17:05:37,898 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-01T17:05:37,898 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T17:05:37,901 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:36521 2024-12-01T17:05:37,902 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36521 connecting to ZooKeeper ensemble=127.0.0.1:56318 2024-12-01T17:05:37,904 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T17:05:37,908 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T17:05:37,917 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:365210x0, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T17:05:37,919 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:365210x0, quorum=127.0.0.1:56318, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T17:05:37,920 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-01T17:05:37,925 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36521-0x10191db4a6c0002 connected 2024-12-01T17:05:37,932 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-01T17:05:37,934 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-01T17:05:37,936 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T17:05:37,941 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36521 2024-12-01T17:05:37,946 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36521 2024-12-01T17:05:37,947 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36521 2024-12-01T17:05:37,948 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36521 2024-12-01T17:05:37,952 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36521 2024-12-01T17:05:37,967 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/b87883a7882d:0 server-side Connection retries=45 2024-12-01T17:05:37,968 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T17:05:37,968 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T17:05:37,968 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T17:05:37,968 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T17:05:37,968 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T17:05:37,968 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-01T17:05:37,969 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T17:05:37,970 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:41375 2024-12-01T17:05:37,971 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41375 connecting to ZooKeeper ensemble=127.0.0.1:56318 2024-12-01T17:05:37,973 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T17:05:37,975 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T17:05:37,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:413750x0, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T17:05:37,993 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:413750x0, quorum=127.0.0.1:56318, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T17:05:37,993 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41375-0x10191db4a6c0003 connected 2024-12-01T17:05:37,994 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-01T17:05:37,995 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-01T17:05:37,996 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-01T17:05:37,998 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T17:05:38,000 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41375 2024-12-01T17:05:38,001 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41375 2024-12-01T17:05:38,003 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41375 2024-12-01T17:05:38,004 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41375 2024-12-01T17:05:38,005 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41375 2024-12-01T17:05:38,027 DEBUG [M:0;b87883a7882d:38737 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;b87883a7882d:38737 2024-12-01T17:05:38,029 INFO [master/b87883a7882d:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/b87883a7882d,38737,1733072736902 2024-12-01T17:05:38,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T17:05:38,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T17:05:38,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T17:05:38,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T17:05:38,047 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/b87883a7882d,38737,1733072736902 2024-12-01T17:05:38,075 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-01T17:05:38,075 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-01T17:05:38,075 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T17:05:38,075 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T17:05:38,075 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T17:05:38,075 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-01T17:05:38,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T17:05:38,080 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-01T17:05:38,081 INFO [master/b87883a7882d:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/b87883a7882d,38737,1733072736902 from backup master directory 2024-12-01T17:05:38,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T17:05:38,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/b87883a7882d,38737,1733072736902 2024-12-01T17:05:38,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T17:05:38,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T17:05:38,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T17:05:38,097 WARN [master/b87883a7882d:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T17:05:38,097 INFO [master/b87883a7882d:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=b87883a7882d,38737,1733072736902 2024-12-01T17:05:38,099 INFO [master/b87883a7882d:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-01T17:05:38,101 INFO [master/b87883a7882d:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-01T17:05:38,167 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/hbase.id] with ID: 75930971-73b3-4a63-a3bb-6db1d7d1d483 2024-12-01T17:05:38,167 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.tmp/hbase.id 2024-12-01T17:05:38,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741826_1002 (size=42) 2024-12-01T17:05:38,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741826_1002 (size=42) 2024-12-01T17:05:38,197 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.tmp/hbase.id]:[hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/hbase.id] 2024-12-01T17:05:38,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741826_1002 (size=42) 2024-12-01T17:05:38,246 INFO [master/b87883a7882d:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T17:05:38,251 INFO [master/b87883a7882d:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-01T17:05:38,271 INFO [master/b87883a7882d:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 17ms. 2024-12-01T17:05:38,285 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T17:05:38,285 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T17:05:38,285 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T17:05:38,285 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T17:05:38,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741827_1003 (size=196) 2024-12-01T17:05:38,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741827_1003 (size=196) 2024-12-01T17:05:38,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741827_1003 (size=196) 2024-12-01T17:05:38,327 INFO [master/b87883a7882d:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T17:05:38,329 INFO [master/b87883a7882d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-01T17:05:38,345 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:150) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:174) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:262) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:231) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:400) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T17:05:38,350 INFO [master/b87883a7882d:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-01T17:05:38,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741828_1004 (size=1189) 2024-12-01T17:05:38,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741828_1004 (size=1189) 2024-12-01T17:05:38,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741828_1004 (size=1189) 2024-12-01T17:05:38,802 INFO [master/b87883a7882d:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/MasterData/data/master/store 2024-12-01T17:05:38,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741829_1005 (size=34) 2024-12-01T17:05:38,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741829_1005 (size=34) 2024-12-01T17:05:38,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741829_1005 (size=34) 2024-12-01T17:05:38,831 INFO [master/b87883a7882d:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-01T17:05:38,834 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:05:38,835 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-01T17:05:38,836 INFO [master/b87883a7882d:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T17:05:38,836 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T17:05:38,837 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-01T17:05:38,837 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T17:05:38,838 INFO [master/b87883a7882d:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T17:05:38,839 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733072738835Disabling compacts and flushes for region at 1733072738835Disabling writes for close at 1733072738837 (+2 ms)Writing region close event to WAL at 1733072738838 (+1 ms)Closed at 1733072738838 2024-12-01T17:05:38,841 WARN [master/b87883a7882d:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/MasterData/data/master/store/.initializing 2024-12-01T17:05:38,841 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/MasterData/WALs/b87883a7882d,38737,1733072736902 2024-12-01T17:05:38,851 INFO [master/b87883a7882d:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-01T17:05:38,872 INFO [master/b87883a7882d:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b87883a7882d%2C38737%2C1733072736902, suffix=, logDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/MasterData/WALs/b87883a7882d,38737,1733072736902, archiveDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/MasterData/oldWALs, maxLogs=10 2024-12-01T17:05:38,901 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/MasterData/WALs/b87883a7882d,38737,1733072736902/b87883a7882d%2C38737%2C1733072736902.1733072738878, exclude list is [], retry=0 2024-12-01T17:05:38,937 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36379,DS-66fd9c32-3ad4-437a-8b5c-d300efc8d3c7,DISK] 2024-12-01T17:05:38,937 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44603,DS-b6e128c0-a4bb-411c-90ad-04cc93436444,DISK] 2024-12-01T17:05:38,941 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-01T17:05:38,941 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36291,DS-77e20430-c1a6-42bb-bd62-c245d594c058,DISK] 2024-12-01T17:05:38,995 INFO [master/b87883a7882d:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/MasterData/WALs/b87883a7882d,38737,1733072736902/b87883a7882d%2C38737%2C1733072736902.1733072738878 2024-12-01T17:05:39,010 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45899:45899),(127.0.0.1/127.0.0.1:44297:44297),(127.0.0.1/127.0.0.1:34403:34403)] 2024-12-01T17:05:39,011 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-01T17:05:39,012 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:05:39,015 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T17:05:39,016 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T17:05:39,059 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T17:05:39,094 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-01T17:05:39,100 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:05:39,104 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T17:05:39,106 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T17:05:39,119 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-01T17:05:39,119 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:05:39,121 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T17:05:39,121 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T17:05:39,126 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-01T17:05:39,127 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:05:39,128 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T17:05:39,129 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T17:05:39,134 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-01T17:05:39,134 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:05:39,136 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T17:05:39,136 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T17:05:39,141 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-01T17:05:39,142 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-01T17:05:39,148 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T17:05:39,149 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T17:05:39,153 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-01T17:05:39,158 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T17:05:39,162 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T17:05:39,164 INFO [master/b87883a7882d:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62939683, jitterRate=-0.062125638127326965}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-01T17:05:39,171 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733072739030Initializing all the Stores at 1733072739033 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733072739033Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733072739035 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733072739035Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733072739035Cleaning up temporary data from old regions at 1733072739149 (+114 ms)Region opened successfully at 1733072739170 (+21 ms) 2024-12-01T17:05:39,174 INFO [master/b87883a7882d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-01T17:05:39,209 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3de3a2ec, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b87883a7882d/172.17.0.3:0 2024-12-01T17:05:39,255 INFO [master/b87883a7882d:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-01T17:05:39,288 INFO [master/b87883a7882d:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-01T17:05:39,288 INFO [master/b87883a7882d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-01T17:05:39,299 INFO [master/b87883a7882d:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-01T17:05:39,308 INFO [master/b87883a7882d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 8 msec 2024-12-01T17:05:39,316 INFO [master/b87883a7882d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 8 msec 2024-12-01T17:05:39,316 INFO [master/b87883a7882d:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-01T17:05:39,366 INFO [master/b87883a7882d:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-01T17:05:39,381 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-01T17:05:39,401 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-01T17:05:39,404 INFO [master/b87883a7882d:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-01T17:05:39,418 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-01T17:05:39,432 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-01T17:05:39,435 INFO [master/b87883a7882d:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-01T17:05:39,441 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-01T17:05:39,453 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-01T17:05:39,455 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-01T17:05:39,464 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-01T17:05:39,481 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-01T17:05:39,485 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-01T17:05:39,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-01T17:05:39,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-01T17:05:39,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T17:05:39,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T17:05:39,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-01T17:05:39,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-01T17:05:39,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T17:05:39,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T17:05:39,499 INFO [master/b87883a7882d:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=b87883a7882d,38737,1733072736902, sessionid=0x10191db4a6c0000, setting cluster-up flag (Was=false) 2024-12-01T17:05:39,527 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T17:05:39,527 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T17:05:39,527 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T17:05:39,528 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T17:05:39,559 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-01T17:05:39,561 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b87883a7882d,38737,1733072736902 2024-12-01T17:05:39,580 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T17:05:39,580 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T17:05:39,580 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T17:05:39,580 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T17:05:39,611 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-01T17:05:39,614 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b87883a7882d,38737,1733072736902 2024-12-01T17:05:39,620 INFO [master/b87883a7882d:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-01T17:05:39,647 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] master.HMaster(3441): Registered master coprocessor service: service=AccessControlService 2024-12-01T17:05:39,652 INFO [master/b87883a7882d:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-01T17:05:39,652 INFO [master/b87883a7882d:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver loaded, priority=536870912. 2024-12-01T17:05:39,693 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-01T17:05:39,701 INFO [master/b87883a7882d:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-01T17:05:39,707 INFO [master/b87883a7882d:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-01T17:05:39,715 INFO [RS:2;b87883a7882d:41375 {}] regionserver.HRegionServer(746): ClusterId : 75930971-73b3-4a63-a3bb-6db1d7d1d483 2024-12-01T17:05:39,715 INFO [RS:1;b87883a7882d:36521 {}] regionserver.HRegionServer(746): ClusterId : 75930971-73b3-4a63-a3bb-6db1d7d1d483 2024-12-01T17:05:39,715 INFO [RS:0;b87883a7882d:44753 {}] regionserver.HRegionServer(746): ClusterId : 75930971-73b3-4a63-a3bb-6db1d7d1d483 2024-12-01T17:05:39,714 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: b87883a7882d,38737,1733072736902 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-01T17:05:39,718 DEBUG [RS:2;b87883a7882d:41375 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-01T17:05:39,718 DEBUG [RS:1;b87883a7882d:36521 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-01T17:05:39,724 DEBUG [RS:0;b87883a7882d:44753 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-01T17:05:39,739 DEBUG [RS:0;b87883a7882d:44753 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-01T17:05:39,739 DEBUG [RS:1;b87883a7882d:36521 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-01T17:05:39,739 DEBUG [RS:2;b87883a7882d:41375 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-01T17:05:39,740 DEBUG [RS:0;b87883a7882d:44753 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-01T17:05:39,740 DEBUG [RS:1;b87883a7882d:36521 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-01T17:05:39,740 DEBUG [RS:2;b87883a7882d:41375 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-01T17:05:39,740 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/b87883a7882d:0, corePoolSize=5, maxPoolSize=5 2024-12-01T17:05:39,740 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/b87883a7882d:0, corePoolSize=5, maxPoolSize=5 2024-12-01T17:05:39,741 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/b87883a7882d:0, corePoolSize=5, maxPoolSize=5 2024-12-01T17:05:39,741 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/b87883a7882d:0, corePoolSize=5, maxPoolSize=5 2024-12-01T17:05:39,741 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/b87883a7882d:0, corePoolSize=10, maxPoolSize=10 2024-12-01T17:05:39,741 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/b87883a7882d:0, corePoolSize=1, maxPoolSize=1 2024-12-01T17:05:39,741 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/b87883a7882d:0, corePoolSize=2, maxPoolSize=2 2024-12-01T17:05:39,741 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/b87883a7882d:0, corePoolSize=1, maxPoolSize=1 2024-12-01T17:05:39,743 INFO [master/b87883a7882d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733072769742 2024-12-01T17:05:39,744 INFO [master/b87883a7882d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-01T17:05:39,745 INFO [master/b87883a7882d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-01T17:05:39,746 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-01T17:05:39,747 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-01T17:05:39,749 INFO [master/b87883a7882d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-01T17:05:39,750 INFO [master/b87883a7882d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-01T17:05:39,750 INFO [master/b87883a7882d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-01T17:05:39,750 INFO [master/b87883a7882d:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-01T17:05:39,751 INFO [master/b87883a7882d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-01T17:05:39,753 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:05:39,753 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-01T17:05:39,756 INFO [master/b87883a7882d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-01T17:05:39,758 INFO [master/b87883a7882d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-01T17:05:39,758 INFO [master/b87883a7882d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-01T17:05:39,760 DEBUG [RS:0;b87883a7882d:44753 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-01T17:05:39,760 DEBUG [RS:2;b87883a7882d:41375 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-01T17:05:39,760 DEBUG [RS:1;b87883a7882d:36521 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-01T17:05:39,760 DEBUG [RS:1;b87883a7882d:36521 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c96d143, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b87883a7882d/172.17.0.3:0 2024-12-01T17:05:39,760 DEBUG [RS:0;b87883a7882d:44753 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15db67e9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b87883a7882d/172.17.0.3:0 2024-12-01T17:05:39,760 DEBUG [RS:2;b87883a7882d:41375 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ba99d3a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b87883a7882d/172.17.0.3:0 2024-12-01T17:05:39,762 INFO [master/b87883a7882d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-01T17:05:39,763 INFO [master/b87883a7882d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-01T17:05:39,768 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/b87883a7882d:0:becomeActiveMaster-HFileCleaner.large.0-1733072739764,5,FailOnTimeoutGroup] 2024-12-01T17:05:39,770 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/b87883a7882d:0:becomeActiveMaster-HFileCleaner.small.0-1733072739768,5,FailOnTimeoutGroup] 2024-12-01T17:05:39,770 INFO [master/b87883a7882d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-01T17:05:39,771 INFO [master/b87883a7882d:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-01T17:05:39,772 INFO [master/b87883a7882d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-01T17:05:39,773 INFO [master/b87883a7882d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-01T17:05:39,774 DEBUG [RS:0;b87883a7882d:44753 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;b87883a7882d:44753 2024-12-01T17:05:39,775 DEBUG [RS:1;b87883a7882d:36521 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;b87883a7882d:36521 2024-12-01T17:05:39,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741831_1007 (size=1321) 2024-12-01T17:05:39,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741831_1007 (size=1321) 2024-12-01T17:05:39,777 DEBUG [RS:2;b87883a7882d:41375 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;b87883a7882d:41375 2024-12-01T17:05:39,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741831_1007 (size=1321) 2024-12-01T17:05:39,779 INFO [RS:1;b87883a7882d:36521 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-01T17:05:39,779 INFO [RS:0;b87883a7882d:44753 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-01T17:05:39,779 INFO [RS:2;b87883a7882d:41375 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-01T17:05:39,779 INFO [RS:0;b87883a7882d:44753 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-01T17:05:39,779 INFO [RS:2;b87883a7882d:41375 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-01T17:05:39,779 INFO [RS:1;b87883a7882d:36521 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-01T17:05:39,779 DEBUG [RS:0;b87883a7882d:44753 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-01T17:05:39,779 DEBUG [RS:1;b87883a7882d:36521 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-01T17:05:39,780 INFO [RS:0;b87883a7882d:44753 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-01T17:05:39,780 INFO [RS:1;b87883a7882d:36521 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-01T17:05:39,780 DEBUG [RS:2;b87883a7882d:41375 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-01T17:05:39,780 DEBUG [RS:0;b87883a7882d:44753 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-01T17:05:39,780 DEBUG [RS:1;b87883a7882d:36521 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-01T17:05:39,780 INFO [RS:2;b87883a7882d:41375 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-01T17:05:39,780 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-01T17:05:39,780 DEBUG [RS:2;b87883a7882d:41375 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-01T17:05:39,780 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:05:39,784 INFO [RS:0;b87883a7882d:44753 {}] regionserver.HRegionServer(2659): reportForDuty to master=b87883a7882d,38737,1733072736902 with port=44753, startcode=1733072737741 2024-12-01T17:05:39,784 INFO [RS:1;b87883a7882d:36521 {}] regionserver.HRegionServer(2659): reportForDuty to master=b87883a7882d,38737,1733072736902 with port=36521, startcode=1733072737896 2024-12-01T17:05:39,784 INFO [RS:2;b87883a7882d:41375 {}] regionserver.HRegionServer(2659): reportForDuty to master=b87883a7882d,38737,1733072736902 with port=41375, startcode=1733072737967 2024-12-01T17:05:39,795 DEBUG [RS:1;b87883a7882d:36521 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-01T17:05:39,795 DEBUG [RS:2;b87883a7882d:41375 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-01T17:05:39,795 DEBUG [RS:0;b87883a7882d:44753 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-01T17:05:39,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741832_1008 (size=32) 2024-12-01T17:05:39,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741832_1008 (size=32) 2024-12-01T17:05:39,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741832_1008 (size=32) 2024-12-01T17:05:39,807 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:05:39,814 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-01T17:05:39,818 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-01T17:05:39,819 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:05:39,821 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T17:05:39,826 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-01T17:05:39,834 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-01T17:05:39,835 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:05:39,836 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T17:05:39,836 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34647, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-01T17:05:39,836 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55337, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-01T17:05:39,837 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50775, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-01T17:05:39,839 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-01T17:05:39,842 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-01T17:05:39,842 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:05:39,843 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38737 {}] master.ServerManager(363): Checking decommissioned status of RegionServer b87883a7882d,44753,1733072737741 2024-12-01T17:05:39,844 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T17:05:39,844 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-01T17:05:39,846 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38737 {}] master.ServerManager(517): Registering regionserver=b87883a7882d,44753,1733072737741 2024-12-01T17:05:39,847 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-01T17:05:39,848 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:05:39,849 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T17:05:39,849 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-01T17:05:39,852 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740 2024-12-01T17:05:39,853 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740 2024-12-01T17:05:39,858 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-01T17:05:39,859 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-01T17:05:39,863 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38737 {}] master.ServerManager(363): Checking decommissioned status of RegionServer b87883a7882d,41375,1733072737967 2024-12-01T17:05:39,863 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38737 {}] master.ServerManager(517): Registering regionserver=b87883a7882d,41375,1733072737967 2024-12-01T17:05:39,867 DEBUG [RS:0;b87883a7882d:44753 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:05:39,867 DEBUG [RS:0;b87883a7882d:44753 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44451 2024-12-01T17:05:39,867 DEBUG [RS:0;b87883a7882d:44753 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-01T17:05:39,869 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38737 {}] master.ServerManager(363): Checking decommissioned status of RegionServer b87883a7882d,36521,1733072737896 2024-12-01T17:05:39,869 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38737 {}] master.ServerManager(517): Registering regionserver=b87883a7882d,36521,1733072737896 2024-12-01T17:05:39,872 DEBUG [RS:2;b87883a7882d:41375 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:05:39,872 DEBUG [RS:2;b87883a7882d:41375 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44451 2024-12-01T17:05:39,872 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-01T17:05:39,872 DEBUG [RS:2;b87883a7882d:41375 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-01T17:05:39,874 DEBUG [RS:1;b87883a7882d:36521 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:05:39,874 DEBUG [RS:1;b87883a7882d:36521 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44451 2024-12-01T17:05:39,874 DEBUG [RS:1;b87883a7882d:36521 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-01T17:05:39,876 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-01T17:05:39,884 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T17:05:39,889 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T17:05:39,891 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70843802, jitterRate=0.05565491318702698}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-01T17:05:39,893 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733072739807Initializing all the Stores at 1733072739810 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733072739810Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733072739812 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733072739812Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733072739812Cleaning up temporary data from old regions at 1733072739859 (+47 ms)Region opened successfully at 1733072739893 (+34 ms) 2024-12-01T17:05:39,893 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-01T17:05:39,893 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-01T17:05:39,893 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-01T17:05:39,893 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-01T17:05:39,893 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-01T17:05:39,895 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-01T17:05:39,895 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733072739893Disabling compacts and flushes for region at 1733072739893Disabling writes for close at 1733072739893Writing region close event to WAL at 1733072739894 (+1 ms)Closed at 1733072739895 (+1 ms) 2024-12-01T17:05:39,898 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-01T17:05:39,898 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-01T17:05:39,905 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-01T17:05:39,914 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-01T17:05:39,919 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-01T17:05:39,978 DEBUG [RS:0;b87883a7882d:44753 {}] zookeeper.ZKUtil(111): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b87883a7882d,44753,1733072737741 2024-12-01T17:05:39,978 WARN [RS:0;b87883a7882d:44753 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T17:05:39,978 INFO [RS:0;b87883a7882d:44753 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-01T17:05:39,979 DEBUG [RS:0;b87883a7882d:44753 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/WALs/b87883a7882d,44753,1733072737741 2024-12-01T17:05:39,979 DEBUG [RS:2;b87883a7882d:41375 {}] zookeeper.ZKUtil(111): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b87883a7882d,41375,1733072737967 2024-12-01T17:05:39,979 WARN [RS:2;b87883a7882d:41375 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T17:05:39,979 DEBUG [RS:1;b87883a7882d:36521 {}] zookeeper.ZKUtil(111): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b87883a7882d,36521,1733072737896 2024-12-01T17:05:39,979 WARN [RS:1;b87883a7882d:36521 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T17:05:39,979 INFO [RS:2;b87883a7882d:41375 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-01T17:05:39,979 INFO [RS:1;b87883a7882d:36521 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-01T17:05:39,979 DEBUG [RS:2;b87883a7882d:41375 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/WALs/b87883a7882d,41375,1733072737967 2024-12-01T17:05:39,979 DEBUG [RS:1;b87883a7882d:36521 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/WALs/b87883a7882d,36521,1733072737896 2024-12-01T17:05:39,980 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b87883a7882d,44753,1733072737741] 2024-12-01T17:05:39,980 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b87883a7882d,41375,1733072737967] 2024-12-01T17:05:39,980 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b87883a7882d,36521,1733072737896] 2024-12-01T17:05:40,011 INFO [RS:2;b87883a7882d:41375 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-01T17:05:40,011 INFO [RS:0;b87883a7882d:44753 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-01T17:05:40,011 INFO [RS:1;b87883a7882d:36521 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-01T17:05:40,025 INFO [RS:0;b87883a7882d:44753 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-01T17:05:40,026 INFO [RS:1;b87883a7882d:36521 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-01T17:05:40,030 INFO [RS:2;b87883a7882d:41375 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-01T17:05:40,037 INFO [RS:1;b87883a7882d:36521 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-01T17:05:40,037 INFO [RS:2;b87883a7882d:41375 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-01T17:05:40,037 INFO [RS:0;b87883a7882d:44753 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-01T17:05:40,037 INFO [RS:2;b87883a7882d:41375 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T17:05:40,037 INFO [RS:1;b87883a7882d:36521 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T17:05:40,037 INFO [RS:0;b87883a7882d:44753 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T17:05:40,042 INFO [RS:0;b87883a7882d:44753 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-01T17:05:40,042 INFO [RS:1;b87883a7882d:36521 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-01T17:05:40,042 INFO [RS:2;b87883a7882d:41375 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-01T17:05:40,049 INFO [RS:1;b87883a7882d:36521 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-01T17:05:40,049 INFO [RS:0;b87883a7882d:44753 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-01T17:05:40,049 INFO [RS:2;b87883a7882d:41375 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-01T17:05:40,050 INFO [RS:1;b87883a7882d:36521 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-01T17:05:40,050 INFO [RS:2;b87883a7882d:41375 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-01T17:05:40,050 INFO [RS:0;b87883a7882d:44753 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-01T17:05:40,051 DEBUG [RS:0;b87883a7882d:44753 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b87883a7882d:0, corePoolSize=1, maxPoolSize=1 2024-12-01T17:05:40,051 DEBUG [RS:1;b87883a7882d:36521 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b87883a7882d:0, corePoolSize=1, maxPoolSize=1 2024-12-01T17:05:40,051 DEBUG [RS:2;b87883a7882d:41375 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b87883a7882d:0, corePoolSize=1, maxPoolSize=1 2024-12-01T17:05:40,051 DEBUG [RS:1;b87883a7882d:36521 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b87883a7882d:0, corePoolSize=1, maxPoolSize=1 2024-12-01T17:05:40,051 DEBUG [RS:0;b87883a7882d:44753 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b87883a7882d:0, corePoolSize=1, maxPoolSize=1 2024-12-01T17:05:40,051 DEBUG [RS:2;b87883a7882d:41375 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b87883a7882d:0, corePoolSize=1, maxPoolSize=1 2024-12-01T17:05:40,051 DEBUG [RS:1;b87883a7882d:36521 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b87883a7882d:0, corePoolSize=1, maxPoolSize=1 2024-12-01T17:05:40,051 DEBUG [RS:0;b87883a7882d:44753 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b87883a7882d:0, corePoolSize=1, maxPoolSize=1 2024-12-01T17:05:40,051 DEBUG [RS:2;b87883a7882d:41375 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b87883a7882d:0, corePoolSize=1, maxPoolSize=1 2024-12-01T17:05:40,051 DEBUG [RS:1;b87883a7882d:36521 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b87883a7882d:0, corePoolSize=1, maxPoolSize=1 2024-12-01T17:05:40,051 DEBUG [RS:0;b87883a7882d:44753 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b87883a7882d:0, corePoolSize=1, maxPoolSize=1 2024-12-01T17:05:40,051 DEBUG [RS:1;b87883a7882d:36521 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b87883a7882d:0, corePoolSize=1, maxPoolSize=1 2024-12-01T17:05:40,051 DEBUG [RS:2;b87883a7882d:41375 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b87883a7882d:0, corePoolSize=1, maxPoolSize=1 2024-12-01T17:05:40,051 DEBUG [RS:1;b87883a7882d:36521 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b87883a7882d:0, corePoolSize=2, maxPoolSize=2 2024-12-01T17:05:40,051 DEBUG [RS:0;b87883a7882d:44753 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b87883a7882d:0, corePoolSize=1, maxPoolSize=1 2024-12-01T17:05:40,051 DEBUG [RS:2;b87883a7882d:41375 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b87883a7882d:0, corePoolSize=1, maxPoolSize=1 2024-12-01T17:05:40,051 DEBUG [RS:1;b87883a7882d:36521 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b87883a7882d:0, corePoolSize=1, maxPoolSize=1 2024-12-01T17:05:40,051 DEBUG [RS:0;b87883a7882d:44753 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b87883a7882d:0, corePoolSize=2, maxPoolSize=2 2024-12-01T17:05:40,051 DEBUG [RS:1;b87883a7882d:36521 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b87883a7882d:0, corePoolSize=1, maxPoolSize=1 2024-12-01T17:05:40,051 DEBUG [RS:2;b87883a7882d:41375 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b87883a7882d:0, corePoolSize=2, maxPoolSize=2 2024-12-01T17:05:40,052 DEBUG [RS:1;b87883a7882d:36521 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b87883a7882d:0, corePoolSize=1, maxPoolSize=1 2024-12-01T17:05:40,052 DEBUG [RS:0;b87883a7882d:44753 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b87883a7882d:0, corePoolSize=1, maxPoolSize=1 2024-12-01T17:05:40,052 DEBUG [RS:2;b87883a7882d:41375 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b87883a7882d:0, corePoolSize=1, maxPoolSize=1 2024-12-01T17:05:40,052 DEBUG [RS:1;b87883a7882d:36521 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/b87883a7882d:0, corePoolSize=1, maxPoolSize=1 2024-12-01T17:05:40,052 DEBUG [RS:0;b87883a7882d:44753 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b87883a7882d:0, corePoolSize=1, maxPoolSize=1 2024-12-01T17:05:40,052 DEBUG [RS:1;b87883a7882d:36521 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b87883a7882d:0, corePoolSize=1, maxPoolSize=1 2024-12-01T17:05:40,052 DEBUG [RS:2;b87883a7882d:41375 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b87883a7882d:0, corePoolSize=1, maxPoolSize=1 2024-12-01T17:05:40,052 DEBUG [RS:1;b87883a7882d:36521 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b87883a7882d:0, corePoolSize=1, maxPoolSize=1 2024-12-01T17:05:40,052 DEBUG [RS:0;b87883a7882d:44753 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b87883a7882d:0, corePoolSize=1, maxPoolSize=1 2024-12-01T17:05:40,052 DEBUG [RS:2;b87883a7882d:41375 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b87883a7882d:0, corePoolSize=1, maxPoolSize=1 2024-12-01T17:05:40,052 DEBUG [RS:1;b87883a7882d:36521 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0, corePoolSize=3, maxPoolSize=3 2024-12-01T17:05:40,052 DEBUG [RS:0;b87883a7882d:44753 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/b87883a7882d:0, corePoolSize=1, maxPoolSize=1 2024-12-01T17:05:40,052 DEBUG [RS:2;b87883a7882d:41375 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/b87883a7882d:0, corePoolSize=1, maxPoolSize=1 2024-12-01T17:05:40,052 DEBUG [RS:1;b87883a7882d:36521 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b87883a7882d:0, corePoolSize=3, maxPoolSize=3 2024-12-01T17:05:40,052 DEBUG [RS:0;b87883a7882d:44753 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b87883a7882d:0, corePoolSize=1, maxPoolSize=1 2024-12-01T17:05:40,052 DEBUG [RS:2;b87883a7882d:41375 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b87883a7882d:0, corePoolSize=1, maxPoolSize=1 2024-12-01T17:05:40,052 DEBUG [RS:2;b87883a7882d:41375 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b87883a7882d:0, corePoolSize=1, maxPoolSize=1 2024-12-01T17:05:40,052 DEBUG [RS:0;b87883a7882d:44753 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b87883a7882d:0, corePoolSize=1, maxPoolSize=1 2024-12-01T17:05:40,053 DEBUG [RS:2;b87883a7882d:41375 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0, corePoolSize=3, maxPoolSize=3 2024-12-01T17:05:40,053 DEBUG [RS:0;b87883a7882d:44753 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0, corePoolSize=3, maxPoolSize=3 2024-12-01T17:05:40,053 DEBUG [RS:2;b87883a7882d:41375 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b87883a7882d:0, corePoolSize=3, maxPoolSize=3 2024-12-01T17:05:40,053 DEBUG [RS:0;b87883a7882d:44753 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b87883a7882d:0, corePoolSize=3, maxPoolSize=3 2024-12-01T17:05:40,060 INFO [RS:1;b87883a7882d:36521 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T17:05:40,060 INFO [RS:2;b87883a7882d:41375 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T17:05:40,060 INFO [RS:0;b87883a7882d:44753 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T17:05:40,061 INFO [RS:0;b87883a7882d:44753 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T17:05:40,061 INFO [RS:2;b87883a7882d:41375 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T17:05:40,061 INFO [RS:1;b87883a7882d:36521 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T17:05:40,061 INFO [RS:2;b87883a7882d:41375 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T17:05:40,061 INFO [RS:0;b87883a7882d:44753 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T17:05:40,061 INFO [RS:1;b87883a7882d:36521 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T17:05:40,061 INFO [RS:2;b87883a7882d:41375 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-01T17:05:40,061 INFO [RS:0;b87883a7882d:44753 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-01T17:05:40,061 INFO [RS:1;b87883a7882d:36521 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-01T17:05:40,061 INFO [RS:0;b87883a7882d:44753 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-01T17:05:40,061 INFO [RS:2;b87883a7882d:41375 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-01T17:05:40,061 INFO [RS:1;b87883a7882d:36521 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-01T17:05:40,061 INFO [RS:2;b87883a7882d:41375 {}] hbase.ChoreService(168): Chore ScheduledChore name=b87883a7882d,41375,1733072737967-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T17:05:40,061 INFO [RS:0;b87883a7882d:44753 {}] hbase.ChoreService(168): Chore ScheduledChore name=b87883a7882d,44753,1733072737741-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T17:05:40,061 INFO [RS:1;b87883a7882d:36521 {}] hbase.ChoreService(168): Chore ScheduledChore name=b87883a7882d,36521,1733072737896-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T17:05:40,070 WARN [b87883a7882d:38737 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-01T17:05:40,080 INFO [RS:2;b87883a7882d:41375 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-01T17:05:40,080 INFO [RS:0;b87883a7882d:44753 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-01T17:05:40,083 INFO [RS:0;b87883a7882d:44753 {}] hbase.ChoreService(168): Chore ScheduledChore name=b87883a7882d,44753,1733072737741-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T17:05:40,083 INFO [RS:2;b87883a7882d:41375 {}] hbase.ChoreService(168): Chore ScheduledChore name=b87883a7882d,41375,1733072737967-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T17:05:40,083 INFO [RS:0;b87883a7882d:44753 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T17:05:40,083 INFO [RS:2;b87883a7882d:41375 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T17:05:40,083 INFO [RS:0;b87883a7882d:44753 {}] regionserver.Replication(171): b87883a7882d,44753,1733072737741 started 2024-12-01T17:05:40,083 INFO [RS:2;b87883a7882d:41375 {}] regionserver.Replication(171): b87883a7882d,41375,1733072737967 started 2024-12-01T17:05:40,085 INFO [RS:1;b87883a7882d:36521 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-01T17:05:40,085 INFO [RS:1;b87883a7882d:36521 {}] hbase.ChoreService(168): Chore ScheduledChore name=b87883a7882d,36521,1733072737896-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T17:05:40,085 INFO [RS:1;b87883a7882d:36521 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T17:05:40,086 INFO [RS:1;b87883a7882d:36521 {}] regionserver.Replication(171): b87883a7882d,36521,1733072737896 started 2024-12-01T17:05:40,102 INFO [RS:2;b87883a7882d:41375 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T17:05:40,102 INFO [RS:1;b87883a7882d:36521 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T17:05:40,103 INFO [RS:2;b87883a7882d:41375 {}] regionserver.HRegionServer(1482): Serving as b87883a7882d,41375,1733072737967, RpcServer on b87883a7882d/172.17.0.3:41375, sessionid=0x10191db4a6c0003 2024-12-01T17:05:40,103 INFO [RS:1;b87883a7882d:36521 {}] regionserver.HRegionServer(1482): Serving as b87883a7882d,36521,1733072737896, RpcServer on b87883a7882d/172.17.0.3:36521, sessionid=0x10191db4a6c0002 2024-12-01T17:05:40,104 DEBUG [RS:2;b87883a7882d:41375 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-01T17:05:40,104 DEBUG [RS:1;b87883a7882d:36521 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-01T17:05:40,104 DEBUG [RS:1;b87883a7882d:36521 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b87883a7882d,36521,1733072737896 2024-12-01T17:05:40,104 DEBUG [RS:2;b87883a7882d:41375 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b87883a7882d,41375,1733072737967 2024-12-01T17:05:40,105 DEBUG [RS:2;b87883a7882d:41375 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b87883a7882d,41375,1733072737967' 2024-12-01T17:05:40,105 DEBUG [RS:1;b87883a7882d:36521 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b87883a7882d,36521,1733072737896' 2024-12-01T17:05:40,105 DEBUG [RS:1;b87883a7882d:36521 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-01T17:05:40,105 DEBUG [RS:2;b87883a7882d:41375 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-01T17:05:40,106 DEBUG [RS:1;b87883a7882d:36521 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-01T17:05:40,106 INFO [RS:0;b87883a7882d:44753 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T17:05:40,106 DEBUG [RS:2;b87883a7882d:41375 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-01T17:05:40,106 INFO [RS:0;b87883a7882d:44753 {}] regionserver.HRegionServer(1482): Serving as b87883a7882d,44753,1733072737741, RpcServer on b87883a7882d/172.17.0.3:44753, sessionid=0x10191db4a6c0001 2024-12-01T17:05:40,106 DEBUG [RS:0;b87883a7882d:44753 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-01T17:05:40,106 DEBUG [RS:0;b87883a7882d:44753 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b87883a7882d,44753,1733072737741 2024-12-01T17:05:40,106 DEBUG [RS:0;b87883a7882d:44753 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b87883a7882d,44753,1733072737741' 2024-12-01T17:05:40,106 DEBUG [RS:0;b87883a7882d:44753 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-01T17:05:40,107 DEBUG [RS:1;b87883a7882d:36521 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-01T17:05:40,107 DEBUG [RS:2;b87883a7882d:41375 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-01T17:05:40,107 DEBUG [RS:2;b87883a7882d:41375 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-01T17:05:40,107 DEBUG [RS:1;b87883a7882d:36521 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-01T17:05:40,107 DEBUG [RS:1;b87883a7882d:36521 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b87883a7882d,36521,1733072737896 2024-12-01T17:05:40,107 DEBUG [RS:2;b87883a7882d:41375 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b87883a7882d,41375,1733072737967 2024-12-01T17:05:40,107 DEBUG [RS:1;b87883a7882d:36521 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b87883a7882d,36521,1733072737896' 2024-12-01T17:05:40,107 DEBUG [RS:2;b87883a7882d:41375 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b87883a7882d,41375,1733072737967' 2024-12-01T17:05:40,107 DEBUG [RS:1;b87883a7882d:36521 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-01T17:05:40,107 DEBUG [RS:2;b87883a7882d:41375 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-01T17:05:40,107 DEBUG [RS:0;b87883a7882d:44753 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-01T17:05:40,107 DEBUG [RS:2;b87883a7882d:41375 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-01T17:05:40,108 DEBUG [RS:1;b87883a7882d:36521 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-01T17:05:40,108 DEBUG [RS:0;b87883a7882d:44753 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-01T17:05:40,108 DEBUG [RS:0;b87883a7882d:44753 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-01T17:05:40,108 DEBUG [RS:0;b87883a7882d:44753 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b87883a7882d,44753,1733072737741 2024-12-01T17:05:40,108 DEBUG [RS:0;b87883a7882d:44753 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b87883a7882d,44753,1733072737741' 2024-12-01T17:05:40,108 DEBUG [RS:0;b87883a7882d:44753 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-01T17:05:40,108 DEBUG [RS:2;b87883a7882d:41375 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-01T17:05:40,108 INFO [RS:2;b87883a7882d:41375 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-01T17:05:40,108 DEBUG [RS:1;b87883a7882d:36521 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-01T17:05:40,108 INFO [RS:1;b87883a7882d:36521 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-01T17:05:40,108 INFO [RS:2;b87883a7882d:41375 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-01T17:05:40,108 INFO [RS:1;b87883a7882d:36521 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-01T17:05:40,109 DEBUG [RS:0;b87883a7882d:44753 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-01T17:05:40,109 DEBUG [RS:0;b87883a7882d:44753 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-01T17:05:40,109 INFO [RS:0;b87883a7882d:44753 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-01T17:05:40,109 INFO [RS:0;b87883a7882d:44753 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-01T17:05:40,215 INFO [RS:1;b87883a7882d:36521 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-01T17:05:40,215 INFO [RS:2;b87883a7882d:41375 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-01T17:05:40,215 INFO [RS:0;b87883a7882d:44753 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-01T17:05:40,219 INFO [RS:0;b87883a7882d:44753 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b87883a7882d%2C44753%2C1733072737741, suffix=, logDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/WALs/b87883a7882d,44753,1733072737741, archiveDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/oldWALs, maxLogs=32 2024-12-01T17:05:40,219 INFO [RS:1;b87883a7882d:36521 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b87883a7882d%2C36521%2C1733072737896, suffix=, logDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/WALs/b87883a7882d,36521,1733072737896, archiveDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/oldWALs, maxLogs=32 2024-12-01T17:05:40,219 INFO [RS:2;b87883a7882d:41375 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b87883a7882d%2C41375%2C1733072737967, suffix=, logDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/WALs/b87883a7882d,41375,1733072737967, archiveDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/oldWALs, maxLogs=32 2024-12-01T17:05:40,240 DEBUG [RS:1;b87883a7882d:36521 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/WALs/b87883a7882d,36521,1733072737896/b87883a7882d%2C36521%2C1733072737896.1733072740223, exclude list is [], retry=0 2024-12-01T17:05:40,241 DEBUG [RS:0;b87883a7882d:44753 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/WALs/b87883a7882d,44753,1733072737741/b87883a7882d%2C44753%2C1733072737741.1733072740223, exclude list is [], retry=0 2024-12-01T17:05:40,246 DEBUG [RS:2;b87883a7882d:41375 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/WALs/b87883a7882d,41375,1733072737967/b87883a7882d%2C41375%2C1733072737967.1733072740223, exclude list is [], retry=0 2024-12-01T17:05:40,251 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36379,DS-66fd9c32-3ad4-437a-8b5c-d300efc8d3c7,DISK] 2024-12-01T17:05:40,251 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36291,DS-77e20430-c1a6-42bb-bd62-c245d594c058,DISK] 2024-12-01T17:05:40,251 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44603,DS-b6e128c0-a4bb-411c-90ad-04cc93436444,DISK] 2024-12-01T17:05:40,251 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44603,DS-b6e128c0-a4bb-411c-90ad-04cc93436444,DISK] 2024-12-01T17:05:40,251 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36291,DS-77e20430-c1a6-42bb-bd62-c245d594c058,DISK] 2024-12-01T17:05:40,252 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36379,DS-66fd9c32-3ad4-437a-8b5c-d300efc8d3c7,DISK] 2024-12-01T17:05:40,252 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44603,DS-b6e128c0-a4bb-411c-90ad-04cc93436444,DISK] 2024-12-01T17:05:40,253 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36291,DS-77e20430-c1a6-42bb-bd62-c245d594c058,DISK] 2024-12-01T17:05:40,253 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36379,DS-66fd9c32-3ad4-437a-8b5c-d300efc8d3c7,DISK] 2024-12-01T17:05:40,299 INFO [RS:1;b87883a7882d:36521 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/WALs/b87883a7882d,36521,1733072737896/b87883a7882d%2C36521%2C1733072737896.1733072740223 2024-12-01T17:05:40,300 DEBUG [RS:1;b87883a7882d:36521 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45899:45899),(127.0.0.1/127.0.0.1:34403:34403),(127.0.0.1/127.0.0.1:44297:44297)] 2024-12-01T17:05:40,301 INFO [RS:0;b87883a7882d:44753 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/WALs/b87883a7882d,44753,1733072737741/b87883a7882d%2C44753%2C1733072737741.1733072740223 2024-12-01T17:05:40,305 DEBUG [RS:0;b87883a7882d:44753 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34403:34403),(127.0.0.1/127.0.0.1:44297:44297),(127.0.0.1/127.0.0.1:45899:45899)] 2024-12-01T17:05:40,306 INFO [RS:2;b87883a7882d:41375 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/WALs/b87883a7882d,41375,1733072737967/b87883a7882d%2C41375%2C1733072737967.1733072740223 2024-12-01T17:05:40,307 DEBUG [RS:2;b87883a7882d:41375 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45899:45899),(127.0.0.1/127.0.0.1:44297:44297),(127.0.0.1/127.0.0.1:34403:34403)] 2024-12-01T17:05:40,323 DEBUG [b87883a7882d:38737 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-01T17:05:40,332 DEBUG [b87883a7882d:38737 {}] balancer.BalancerClusterState(204): Hosts are {b87883a7882d=0} racks are {/default-rack=0} 2024-12-01T17:05:40,339 DEBUG [b87883a7882d:38737 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-01T17:05:40,339 DEBUG [b87883a7882d:38737 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-01T17:05:40,339 DEBUG [b87883a7882d:38737 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-01T17:05:40,339 DEBUG [b87883a7882d:38737 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-01T17:05:40,339 DEBUG [b87883a7882d:38737 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-01T17:05:40,339 DEBUG [b87883a7882d:38737 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-01T17:05:40,339 INFO [b87883a7882d:38737 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-01T17:05:40,339 INFO [b87883a7882d:38737 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-01T17:05:40,339 INFO [b87883a7882d:38737 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-01T17:05:40,339 DEBUG [b87883a7882d:38737 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-01T17:05:40,348 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=b87883a7882d,44753,1733072737741 2024-12-01T17:05:40,355 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b87883a7882d,44753,1733072737741, state=OPENING 2024-12-01T17:05:40,380 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-01T17:05:40,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T17:05:40,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T17:05:40,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T17:05:40,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T17:05:40,392 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T17:05:40,392 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T17:05:40,393 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T17:05:40,394 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-01T17:05:40,395 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T17:05:40,397 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=b87883a7882d,44753,1733072737741}] 2024-12-01T17:05:40,580 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-01T17:05:40,582 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:32873, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-01T17:05:40,598 INFO [RS_OPEN_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-01T17:05:40,599 INFO [RS_OPEN_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-01T17:05:40,600 INFO [RS_OPEN_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-01T17:05:40,604 INFO [RS_OPEN_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b87883a7882d%2C44753%2C1733072737741.meta, suffix=.meta, logDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/WALs/b87883a7882d,44753,1733072737741, archiveDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/oldWALs, maxLogs=32 2024-12-01T17:05:40,626 DEBUG [RS_OPEN_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/WALs/b87883a7882d,44753,1733072737741/b87883a7882d%2C44753%2C1733072737741.meta.1733072740606.meta, exclude list is [], retry=0 2024-12-01T17:05:40,635 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36379,DS-66fd9c32-3ad4-437a-8b5c-d300efc8d3c7,DISK] 2024-12-01T17:05:40,636 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44603,DS-b6e128c0-a4bb-411c-90ad-04cc93436444,DISK] 2024-12-01T17:05:40,636 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36291,DS-77e20430-c1a6-42bb-bd62-c245d594c058,DISK] 2024-12-01T17:05:40,642 INFO [RS_OPEN_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/WALs/b87883a7882d,44753,1733072737741/b87883a7882d%2C44753%2C1733072737741.meta.1733072740606.meta 2024-12-01T17:05:40,644 DEBUG [RS_OPEN_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45899:45899),(127.0.0.1/127.0.0.1:34403:34403),(127.0.0.1/127.0.0.1:44297:44297)] 2024-12-01T17:05:40,644 DEBUG [RS_OPEN_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-01T17:05:40,645 DEBUG [RS_OPEN_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=AccessControlService 2024-12-01T17:05:40,647 INFO [RS_OPEN_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-01T17:05:40,648 DEBUG [RS_OPEN_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-01T17:05:40,651 DEBUG [RS_OPEN_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-01T17:05:40,654 INFO [RS_OPEN_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-01T17:05:40,665 DEBUG [RS_OPEN_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-01T17:05:40,667 DEBUG [RS_OPEN_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:05:40,667 DEBUG [RS_OPEN_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-01T17:05:40,667 DEBUG [RS_OPEN_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-01T17:05:40,678 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-01T17:05:40,685 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-01T17:05:40,685 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:05:40,686 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T17:05:40,687 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-01T17:05:40,688 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-01T17:05:40,689 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:05:40,690 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T17:05:40,690 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-01T17:05:40,692 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-01T17:05:40,692 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:05:40,698 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T17:05:40,699 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-01T17:05:40,701 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-01T17:05:40,701 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:05:40,704 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T17:05:40,708 DEBUG [RS_OPEN_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-01T17:05:40,713 DEBUG [RS_OPEN_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740 2024-12-01T17:05:40,718 DEBUG [RS_OPEN_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740 2024-12-01T17:05:40,728 DEBUG [RS_OPEN_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-01T17:05:40,729 DEBUG [RS_OPEN_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-01T17:05:40,732 DEBUG [RS_OPEN_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-01T17:05:40,742 DEBUG [RS_OPEN_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-01T17:05:40,745 INFO [RS_OPEN_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66301216, jitterRate=-0.012034893035888672}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-01T17:05:40,746 DEBUG [RS_OPEN_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-01T17:05:40,752 DEBUG [RS_OPEN_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733072740668Writing region info on filesystem at 1733072740668Initializing all the Stores at 1733072740672 (+4 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733072740672Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733072740676 (+4 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733072740676Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733072740676Cleaning up temporary data from old regions at 1733072740729 (+53 ms)Running coprocessor post-open hooks at 1733072740746 (+17 ms)Region opened successfully at 1733072740752 (+6 ms) 2024-12-01T17:05:40,795 INFO [RS_OPEN_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733072740570 2024-12-01T17:05:40,816 DEBUG [RS_OPEN_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-01T17:05:40,817 INFO [RS_OPEN_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-01T17:05:40,820 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=b87883a7882d,44753,1733072737741 2024-12-01T17:05:40,823 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b87883a7882d,44753,1733072737741, state=OPEN 2024-12-01T17:05:40,875 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-01T17:05:40,875 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-01T17:05:40,875 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-01T17:05:40,875 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T17:05:40,875 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T17:05:40,875 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T17:05:40,876 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-01T17:05:40,876 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=b87883a7882d,44753,1733072737741 2024-12-01T17:05:40,876 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T17:05:40,885 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-01T17:05:40,886 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=b87883a7882d,44753,1733072737741 in 480 msec 2024-12-01T17:05:40,900 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-01T17:05:40,900 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 983 msec 2024-12-01T17:05:40,903 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-01T17:05:40,903 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-01T17:05:40,930 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:05:40,931 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:05:40,981 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:05:40,985 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:46377, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:05:41,019 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.3590 sec 2024-12-01T17:05:41,019 INFO [master/b87883a7882d:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733072741019, completionTime=-1 2024-12-01T17:05:41,023 INFO [master/b87883a7882d:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-01T17:05:41,023 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-01T17:05:41,076 INFO [master/b87883a7882d:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-01T17:05:41,076 INFO [master/b87883a7882d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733072801076 2024-12-01T17:05:41,076 INFO [master/b87883a7882d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733072861076 2024-12-01T17:05:41,076 INFO [master/b87883a7882d:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 52 msec 2024-12-01T17:05:41,080 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-01T17:05:41,088 INFO [master/b87883a7882d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b87883a7882d,38737,1733072736902-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T17:05:41,088 INFO [master/b87883a7882d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b87883a7882d,38737,1733072736902-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T17:05:41,088 INFO [master/b87883a7882d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b87883a7882d,38737,1733072736902-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T17:05:41,090 INFO [master/b87883a7882d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-b87883a7882d:38737, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T17:05:41,090 INFO [master/b87883a7882d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-01T17:05:41,096 INFO [master/b87883a7882d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-01T17:05:41,100 DEBUG [master/b87883a7882d:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-01T17:05:41,133 INFO [master/b87883a7882d:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 3.035sec 2024-12-01T17:05:41,135 INFO [master/b87883a7882d:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-01T17:05:41,137 INFO [master/b87883a7882d:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-01T17:05:41,138 INFO [master/b87883a7882d:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-01T17:05:41,139 INFO [master/b87883a7882d:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-01T17:05:41,139 INFO [master/b87883a7882d:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-01T17:05:41,141 INFO [master/b87883a7882d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b87883a7882d,38737,1733072736902-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T17:05:41,142 INFO [master/b87883a7882d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b87883a7882d,38737,1733072736902-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-01T17:05:41,175 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-01T17:05:41,176 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] client.AsyncConnectionImpl(321): The fetched master address is b87883a7882d,38737,1733072736902 2024-12-01T17:05:41,180 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1a6ce77e 2024-12-01T17:05:41,182 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-01T17:05:41,184 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50961, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-01T17:05:41,190 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38737 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'hbase:acl', {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-01T17:05:41,202 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:acl 2024-12-01T17:05:41,206 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-01T17:05:41,207 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:05:41,208 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38737 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "hbase" qualifier: "acl" procId is: 4 2024-12-01T17:05:41,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-01T17:05:41,217 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-01T17:05:41,231 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18f9bb1c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:05:41,236 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-01T17:05:41,236 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-01T17:05:41,240 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:05:41,243 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:05:41,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741837_1013 (size=349) 2024-12-01T17:05:41,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741837_1013 (size=349) 2024-12-01T17:05:41,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741837_1013 (size=349) 2024-12-01T17:05:41,270 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:05:41,271 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 5b10cafeb6a61c25ce1e97f8b1c04b89, NAME => 'hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:acl', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:05:41,290 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:05:41,290 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:05:41,291 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17450dc3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:05:41,291 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:05:41,294 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:05:41,303 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40560, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:05:41,303 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:05:41,306 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24271697, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:05:41,307 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:05:41,317 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:05:41,318 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:05:41,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-01T17:05:41,340 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:47252, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:05:41,353 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=b87883a7882d,38737,1733072736902 2024-12-01T17:05:41,353 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2305): Starting mini mapreduce cluster... 2024-12-01T17:05:41,353 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/test.cache.data in system properties and HBase conf 2024-12-01T17:05:41,353 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/hadoop.tmp.dir in system properties and HBase conf 2024-12-01T17:05:41,353 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/hadoop.log.dir in system properties and HBase conf 2024-12-01T17:05:41,353 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-01T17:05:41,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-01T17:05:41,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-01T17:05:41,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-01T17:05:41,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-01T17:05:41,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-01T17:05:41,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-01T17:05:41,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-01T17:05:41,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-01T17:05:41,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-01T17:05:41,355 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-01T17:05:41,355 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-01T17:05:41,355 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/nfs.dump.dir in system properties and HBase conf 2024-12-01T17:05:41,355 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/java.io.tmpdir in system properties and HBase conf 2024-12-01T17:05:41,355 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-01T17:05:41,355 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-01T17:05:41,355 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-01T17:05:41,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741838_1014 (size=36) 2024-12-01T17:05:41,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741838_1014 (size=36) 2024-12-01T17:05:41,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741838_1014 (size=36) 2024-12-01T17:05:41,387 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:05:41,389 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1722): Closing 5b10cafeb6a61c25ce1e97f8b1c04b89, disabling compactions & flushes 2024-12-01T17:05:41,390 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89. 2024-12-01T17:05:41,390 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89. 2024-12-01T17:05:41,390 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89. after waiting 0 ms 2024-12-01T17:05:41,390 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89. 2024-12-01T17:05:41,390 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1973): Closed hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89. 2024-12-01T17:05:41,390 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 5b10cafeb6a61c25ce1e97f8b1c04b89: Waiting for close lock at 1733072741387Disabling compacts and flushes for region at 1733072741387Disabling writes for close at 1733072741390 (+3 ms)Writing region close event to WAL at 1733072741390Closed at 1733072741390 2024-12-01T17:05:41,395 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ADD_TO_META 2024-12-01T17:05:41,402 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89.","families":{"info":[{"qualifier":"regioninfo","vlen":35,"tag":[],"timestamp":"1733072741396"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733072741396"}]},"ts":"1733072741396"} 2024-12-01T17:05:41,413 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-01T17:05:41,420 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-01T17:05:41,424 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733072741421"}]},"ts":"1733072741421"} 2024-12-01T17:05:41,440 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLING in hbase:meta 2024-12-01T17:05:41,441 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {b87883a7882d=0} racks are {/default-rack=0} 2024-12-01T17:05:41,444 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-01T17:05:41,444 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-01T17:05:41,444 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-01T17:05:41,444 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-01T17:05:41,444 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-01T17:05:41,444 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-01T17:05:41,444 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-01T17:05:41,444 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-01T17:05:41,444 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-01T17:05:41,444 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-01T17:05:41,447 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=5b10cafeb6a61c25ce1e97f8b1c04b89, ASSIGN}] 2024-12-01T17:05:41,450 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=5b10cafeb6a61c25ce1e97f8b1c04b89, ASSIGN 2024-12-01T17:05:41,453 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=5b10cafeb6a61c25ce1e97f8b1c04b89, ASSIGN; state=OFFLINE, location=b87883a7882d,36521,1733072737896; forceNewPlan=false, retain=false 2024-12-01T17:05:41,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741839_1015 (size=592039) 2024-12-01T17:05:41,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741839_1015 (size=592039) 2024-12-01T17:05:41,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741839_1015 (size=592039) 2024-12-01T17:05:41,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-01T17:05:41,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741840_1016 (size=1663647) 2024-12-01T17:05:41,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741840_1016 (size=1663647) 2024-12-01T17:05:41,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741840_1016 (size=1663647) 2024-12-01T17:05:41,608 INFO [b87883a7882d:38737 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-01T17:05:41,609 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=5b10cafeb6a61c25ce1e97f8b1c04b89, regionState=OPENING, regionLocation=b87883a7882d,36521,1733072737896 2024-12-01T17:05:41,616 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=5b10cafeb6a61c25ce1e97f8b1c04b89, ASSIGN because future has completed 2024-12-01T17:05:41,619 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5b10cafeb6a61c25ce1e97f8b1c04b89, server=b87883a7882d,36521,1733072737896}] 2024-12-01T17:05:41,819 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-01T17:05:41,845 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50843, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-01T17:05:41,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-01T17:05:41,866 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(132): Open hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89. 2024-12-01T17:05:41,867 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 5b10cafeb6a61c25ce1e97f8b1c04b89, NAME => 'hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89.', STARTKEY => '', ENDKEY => ''} 2024-12-01T17:05:41,867 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89. service=AccessControlService 2024-12-01T17:05:41,868 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-01T17:05:41,868 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table acl 5b10cafeb6a61c25ce1e97f8b1c04b89 2024-12-01T17:05:41,868 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(898): Instantiated hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:05:41,868 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 5b10cafeb6a61c25ce1e97f8b1c04b89 2024-12-01T17:05:41,868 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 5b10cafeb6a61c25ce1e97f8b1c04b89 2024-12-01T17:05:41,872 INFO [StoreOpener-5b10cafeb6a61c25ce1e97f8b1c04b89-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family l of region 5b10cafeb6a61c25ce1e97f8b1c04b89 2024-12-01T17:05:41,874 INFO [StoreOpener-5b10cafeb6a61c25ce1e97f8b1c04b89-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5b10cafeb6a61c25ce1e97f8b1c04b89 columnFamilyName l 2024-12-01T17:05:41,874 DEBUG [StoreOpener-5b10cafeb6a61c25ce1e97f8b1c04b89-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:05:41,876 INFO [StoreOpener-5b10cafeb6a61c25ce1e97f8b1c04b89-1 {}] regionserver.HStore(327): Store=5b10cafeb6a61c25ce1e97f8b1c04b89/l, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T17:05:41,876 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 5b10cafeb6a61c25ce1e97f8b1c04b89 2024-12-01T17:05:41,878 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/acl/5b10cafeb6a61c25ce1e97f8b1c04b89 2024-12-01T17:05:41,880 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/acl/5b10cafeb6a61c25ce1e97f8b1c04b89 2024-12-01T17:05:41,881 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 5b10cafeb6a61c25ce1e97f8b1c04b89 2024-12-01T17:05:41,881 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 5b10cafeb6a61c25ce1e97f8b1c04b89 2024-12-01T17:05:41,885 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 5b10cafeb6a61c25ce1e97f8b1c04b89 2024-12-01T17:05:41,894 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/acl/5b10cafeb6a61c25ce1e97f8b1c04b89/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T17:05:41,895 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1114): Opened 5b10cafeb6a61c25ce1e97f8b1c04b89; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59333419, jitterRate=-0.11586315929889679}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T17:05:41,896 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5b10cafeb6a61c25ce1e97f8b1c04b89 2024-12-01T17:05:41,898 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 5b10cafeb6a61c25ce1e97f8b1c04b89: Running coprocessor pre-open hook at 1733072741868Writing region info on filesystem at 1733072741869 (+1 ms)Initializing all the Stores at 1733072741870 (+1 ms)Instantiating store for column family {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733072741870Cleaning up temporary data from old regions at 1733072741881 (+11 ms)Running coprocessor post-open hooks at 1733072741896 (+15 ms)Region opened successfully at 1733072741898 (+2 ms) 2024-12-01T17:05:41,900 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89., pid=6, masterSystemTime=1733072741814 2024-12-01T17:05:41,905 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89. 2024-12-01T17:05:41,906 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(153): Opened hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89. 2024-12-01T17:05:41,909 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=5b10cafeb6a61c25ce1e97f8b1c04b89, regionState=OPEN, openSeqNum=2, regionLocation=b87883a7882d,36521,1733072737896 2024-12-01T17:05:41,917 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5b10cafeb6a61c25ce1e97f8b1c04b89, server=b87883a7882d,36521,1733072737896 because future has completed 2024-12-01T17:05:41,936 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-01T17:05:41,936 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 5b10cafeb6a61c25ce1e97f8b1c04b89, server=b87883a7882d,36521,1733072737896 in 308 msec 2024-12-01T17:05:41,943 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-01T17:05:41,943 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=5b10cafeb6a61c25ce1e97f8b1c04b89, ASSIGN in 490 msec 2024-12-01T17:05:41,946 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-01T17:05:41,946 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733072741946"}]},"ts":"1733072741946"} 2024-12-01T17:05:41,952 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLED in hbase:meta 2024-12-01T17:05:41,955 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_POST_OPERATION 2024-12-01T17:05:41,960 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:acl in 760 msec 2024-12-01T17:05:42,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-01T17:05:42,365 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: hbase:acl completed 2024-12-01T17:05:42,374 DEBUG [master/b87883a7882d:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-01T17:05:42,375 INFO [master/b87883a7882d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-01T17:05:42,376 INFO [master/b87883a7882d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b87883a7882d,38737,1733072736902-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T17:05:43,218 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T17:05:43,344 WARN [Thread-385 {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T17:05:43,608 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-01T17:05:43,608 INFO [Thread-385 {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T17:05:43,609 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T17:05:43,621 INFO [Thread-385 {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T17:05:43,622 INFO [Thread-385 {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T17:05:43,622 INFO [Thread-385 {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-01T17:05:43,624 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T17:05:43,625 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T17:05:43,625 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-01T17:05:43,625 INFO [Thread-385 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1e01e198{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/hadoop.log.dir/,AVAILABLE} 2024-12-01T17:05:43,625 INFO [Thread-385 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d805368{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-01T17:05:43,627 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T17:05:43,638 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@634dba7e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/hadoop.log.dir/,AVAILABLE} 2024-12-01T17:05:43,638 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4aaae963{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-01T17:05:43,826 INFO [Thread-385 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver as a provider class 2024-12-01T17:05:43,827 INFO [Thread-385 {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices as a root resource class 2024-12-01T17:05:43,827 INFO [Thread-385 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-01T17:05:43,830 INFO [Thread-385 {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-01T17:05:43,912 INFO [Thread-385 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-01T17:05:44,336 INFO [Thread-385 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-01T17:05:44,763 INFO [Thread-385 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-01T17:05:44,795 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3523d319{cluster,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/java.io.tmpdir/jetty-localhost-41481-hadoop-yarn-common-3_4_1_jar-_-any-7834674485654572049/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-01T17:05:44,795 INFO [Thread-385 {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7e9879ba{jobhistory,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/java.io.tmpdir/jetty-localhost-32927-hadoop-yarn-common-3_4_1_jar-_-any-737682829151672820/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-01T17:05:44,796 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7872cd00{HTTP/1.1, (http/1.1)}{localhost:41481} 2024-12-01T17:05:44,796 INFO [Time-limited test {}] server.Server(415): Started @17212ms 2024-12-01T17:05:44,796 INFO [Thread-385 {}] server.AbstractConnector(333): Started ServerConnector@5dbf86c3{HTTP/1.1, (http/1.1)}{localhost:32927} 2024-12-01T17:05:44,796 INFO [Thread-385 {}] server.Server(415): Started @17212ms 2024-12-01T17:05:44,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741841_1017 (size=5) 2024-12-01T17:05:44,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741841_1017 (size=5) 2024-12-01T17:05:44,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741841_1017 (size=5) 2024-12-01T17:05:45,924 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-01T17:05:45,931 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T17:05:45,976 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-01T17:05:45,977 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T17:05:45,980 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T17:05:45,980 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T17:05:45,980 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-01T17:05:45,981 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T17:05:45,981 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@181c5184{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/hadoop.log.dir/,AVAILABLE} 2024-12-01T17:05:45,982 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e64c225{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-01T17:05:46,055 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-01T17:05:46,055 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-01T17:05:46,056 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-01T17:05:46,056 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-01T17:05:46,068 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-01T17:05:46,094 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-01T17:05:46,195 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-01T17:05:46,250 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-01T17:05:46,265 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b3cec68{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/java.io.tmpdir/jetty-localhost-33379-hadoop-yarn-common-3_4_1_jar-_-any-12013723617053626457/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-01T17:05:46,267 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1d80ab35{HTTP/1.1, (http/1.1)}{localhost:33379} 2024-12-01T17:05:46,267 INFO [Time-limited test {}] server.Server(415): Started @18682ms 2024-12-01T17:05:46,324 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-01T17:05:46,326 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:acl' 2024-12-01T17:05:46,485 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-01T17:05:46,488 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T17:05:46,503 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-01T17:05:46,504 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T17:05:46,505 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T17:05:46,505 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T17:05:46,505 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-01T17:05:46,506 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T17:05:46,507 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6c37be72{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/hadoop.log.dir/,AVAILABLE} 2024-12-01T17:05:46,507 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1b3a1d64{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-01T17:05:46,555 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-01T17:05:46,555 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-01T17:05:46,555 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-01T17:05:46,555 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-01T17:05:46,567 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-01T17:05:46,574 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-01T17:05:46,703 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-01T17:05:46,711 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@644929b8{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/java.io.tmpdir/jetty-localhost-34703-hadoop-yarn-common-3_4_1_jar-_-any-4248515158421140894/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-01T17:05:46,712 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@331c97f2{HTTP/1.1, (http/1.1)}{localhost:34703} 2024-12-01T17:05:46,712 INFO [Time-limited test {}] server.Server(415): Started @19128ms 2024-12-01T17:05:46,748 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2341): Mini mapreduce cluster started 2024-12-01T17:05:46,750 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T17:05:46,780 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSplitRegion Thread=723, OpenFileDescriptor=782, MaxFileDescriptor=1048576, SystemLoadAverage=286, ProcessCount=11, AvailableMemoryMB=8076 2024-12-01T17:05:46,783 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=723 is superior to 500 2024-12-01T17:05:46,788 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-01T17:05:46,794 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is b87883a7882d,38737,1733072736902 2024-12-01T17:05:46,794 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@426e525b 2024-12-01T17:05:46,794 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-01T17:05:46,797 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40712, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-01T17:05:46,798 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T17:05:46,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-01T17:05:46,803 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-01T17:05:46,805 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSplitRegion" procId is: 7 2024-12-01T17:05:46,805 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:05:46,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-01T17:05:46,808 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-01T17:05:46,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741842_1018 (size=422) 2024-12-01T17:05:46,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741842_1018 (size=422) 2024-12-01T17:05:46,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741842_1018 (size=422) 2024-12-01T17:05:46,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-01T17:05:47,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-01T17:05:47,229 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 12611dd65a0f3153fc731cd592206f66, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1733072746798.12611dd65a0f3153fc731cd592206f66.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:05:47,229 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1cb3ab47122820b9c6978423317f0fa3, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1733072746798.1cb3ab47122820b9c6978423317f0fa3.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:05:47,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741844_1020 (size=83) 2024-12-01T17:05:47,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741844_1020 (size=83) 2024-12-01T17:05:47,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741844_1020 (size=83) 2024-12-01T17:05:47,253 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,1,1733072746798.1cb3ab47122820b9c6978423317f0fa3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:05:47,253 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1722): Closing 1cb3ab47122820b9c6978423317f0fa3, disabling compactions & flushes 2024-12-01T17:05:47,253 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,1,1733072746798.1cb3ab47122820b9c6978423317f0fa3. 2024-12-01T17:05:47,253 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733072746798.1cb3ab47122820b9c6978423317f0fa3. 2024-12-01T17:05:47,253 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733072746798.1cb3ab47122820b9c6978423317f0fa3. after waiting 0 ms 2024-12-01T17:05:47,253 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,1,1733072746798.1cb3ab47122820b9c6978423317f0fa3. 2024-12-01T17:05:47,253 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,1,1733072746798.1cb3ab47122820b9c6978423317f0fa3. 2024-12-01T17:05:47,254 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for 1cb3ab47122820b9c6978423317f0fa3: Waiting for close lock at 1733072747253Disabling compacts and flushes for region at 1733072747253Disabling writes for close at 1733072747253Writing region close event to WAL at 1733072747253Closed at 1733072747253 2024-12-01T17:05:47,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741843_1019 (size=83) 2024-12-01T17:05:47,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741843_1019 (size=83) 2024-12-01T17:05:47,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741843_1019 (size=83) 2024-12-01T17:05:47,259 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,,1733072746798.12611dd65a0f3153fc731cd592206f66.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:05:47,259 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1722): Closing 12611dd65a0f3153fc731cd592206f66, disabling compactions & flushes 2024-12-01T17:05:47,260 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,,1733072746798.12611dd65a0f3153fc731cd592206f66. 2024-12-01T17:05:47,260 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733072746798.12611dd65a0f3153fc731cd592206f66. 2024-12-01T17:05:47,260 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733072746798.12611dd65a0f3153fc731cd592206f66. after waiting 0 ms 2024-12-01T17:05:47,260 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,,1733072746798.12611dd65a0f3153fc731cd592206f66. 2024-12-01T17:05:47,260 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,,1733072746798.12611dd65a0f3153fc731cd592206f66. 2024-12-01T17:05:47,260 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for 12611dd65a0f3153fc731cd592206f66: Waiting for close lock at 1733072747259Disabling compacts and flushes for region at 1733072747259Disabling writes for close at 1733072747260 (+1 ms)Writing region close event to WAL at 1733072747260Closed at 1733072747260 2024-12-01T17:05:47,263 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-01T17:05:47,264 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSplitRegion,1,1733072746798.1cb3ab47122820b9c6978423317f0fa3.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733072747263"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733072747263"}]},"ts":"1733072747263"} 2024-12-01T17:05:47,264 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSplitRegion,,1733072746798.12611dd65a0f3153fc731cd592206f66.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733072747263"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733072747263"}]},"ts":"1733072747263"} 2024-12-01T17:05:47,301 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-01T17:05:47,303 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-01T17:05:47,304 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733072747303"}]},"ts":"1733072747303"} 2024-12-01T17:05:47,307 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=ENABLING in hbase:meta 2024-12-01T17:05:47,307 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {b87883a7882d=0} racks are {/default-rack=0} 2024-12-01T17:05:47,310 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-01T17:05:47,310 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-01T17:05:47,310 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-01T17:05:47,310 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-01T17:05:47,310 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-01T17:05:47,310 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-01T17:05:47,310 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-01T17:05:47,310 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-01T17:05:47,310 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-01T17:05:47,310 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-01T17:05:47,310 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=12611dd65a0f3153fc731cd592206f66, ASSIGN}, {pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=1cb3ab47122820b9c6978423317f0fa3, ASSIGN}] 2024-12-01T17:05:47,313 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=1cb3ab47122820b9c6978423317f0fa3, ASSIGN 2024-12-01T17:05:47,313 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=12611dd65a0f3153fc731cd592206f66, ASSIGN 2024-12-01T17:05:47,315 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=12611dd65a0f3153fc731cd592206f66, ASSIGN; state=OFFLINE, location=b87883a7882d,41375,1733072737967; forceNewPlan=false, retain=false 2024-12-01T17:05:47,315 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=1cb3ab47122820b9c6978423317f0fa3, ASSIGN; state=OFFLINE, location=b87883a7882d,44753,1733072737741; forceNewPlan=false, retain=false 2024-12-01T17:05:47,330 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-01T17:05:47,331 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionServerObservers 2024-12-01T17:05:47,331 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-01T17:05:47,331 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-01T17:05:47,333 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-01T17:05:47,333 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_acl Metrics about Tables on a single HBase RegionServer 2024-12-01T17:05:47,333 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-01T17:05:47,334 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase MasterObservers 2024-12-01T17:05:47,334 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-01T17:05:47,334 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver Metrics about HBase MasterObservers 2024-12-01T17:05:47,334 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-01T17:05:47,334 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionObservers 2024-12-01T17:05:47,335 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-01T17:05:47,335 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-01T17:05:47,335 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-01T17:05:47,335 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-01T17:05:47,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-01T17:05:47,465 INFO [b87883a7882d:38737 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-01T17:05:47,466 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=12611dd65a0f3153fc731cd592206f66, regionState=OPENING, regionLocation=b87883a7882d,41375,1733072737967 2024-12-01T17:05:47,466 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=1cb3ab47122820b9c6978423317f0fa3, regionState=OPENING, regionLocation=b87883a7882d,44753,1733072737741 2024-12-01T17:05:47,470 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=1cb3ab47122820b9c6978423317f0fa3, ASSIGN because future has completed 2024-12-01T17:05:47,471 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1cb3ab47122820b9c6978423317f0fa3, server=b87883a7882d,44753,1733072737741}] 2024-12-01T17:05:47,473 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=12611dd65a0f3153fc731cd592206f66, ASSIGN because future has completed 2024-12-01T17:05:47,474 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 12611dd65a0f3153fc731cd592206f66, server=b87883a7882d,41375,1733072737967}] 2024-12-01T17:05:47,628 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-01T17:05:47,644 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36311, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-01T17:05:47,647 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSplitRegion,1,1733072746798.1cb3ab47122820b9c6978423317f0fa3. 2024-12-01T17:05:47,647 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7752): Opening region: {ENCODED => 1cb3ab47122820b9c6978423317f0fa3, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1733072746798.1cb3ab47122820b9c6978423317f0fa3.', STARTKEY => '1', ENDKEY => ''} 2024-12-01T17:05:47,647 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSplitRegion,1,1733072746798.1cb3ab47122820b9c6978423317f0fa3. service=AccessControlService 2024-12-01T17:05:47,648 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-01T17:05:47,648 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSplitRegion 1cb3ab47122820b9c6978423317f0fa3 2024-12-01T17:05:47,648 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,1,1733072746798.1cb3ab47122820b9c6978423317f0fa3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:05:47,648 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7794): checking encryption for 1cb3ab47122820b9c6978423317f0fa3 2024-12-01T17:05:47,648 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7797): checking classloading for 1cb3ab47122820b9c6978423317f0fa3 2024-12-01T17:05:47,650 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSplitRegion,,1733072746798.12611dd65a0f3153fc731cd592206f66. 2024-12-01T17:05:47,650 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7752): Opening region: {ENCODED => 12611dd65a0f3153fc731cd592206f66, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1733072746798.12611dd65a0f3153fc731cd592206f66.', STARTKEY => '', ENDKEY => '1'} 2024-12-01T17:05:47,650 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSplitRegion,,1733072746798.12611dd65a0f3153fc731cd592206f66. service=AccessControlService 2024-12-01T17:05:47,651 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-01T17:05:47,651 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSplitRegion 12611dd65a0f3153fc731cd592206f66 2024-12-01T17:05:47,651 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,,1733072746798.12611dd65a0f3153fc731cd592206f66.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:05:47,651 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7794): checking encryption for 12611dd65a0f3153fc731cd592206f66 2024-12-01T17:05:47,651 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7797): checking classloading for 12611dd65a0f3153fc731cd592206f66 2024-12-01T17:05:47,653 INFO [StoreOpener-1cb3ab47122820b9c6978423317f0fa3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 1cb3ab47122820b9c6978423317f0fa3 2024-12-01T17:05:47,653 INFO [StoreOpener-12611dd65a0f3153fc731cd592206f66-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 12611dd65a0f3153fc731cd592206f66 2024-12-01T17:05:47,655 INFO [StoreOpener-12611dd65a0f3153fc731cd592206f66-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 12611dd65a0f3153fc731cd592206f66 columnFamilyName cf 2024-12-01T17:05:47,655 INFO [StoreOpener-1cb3ab47122820b9c6978423317f0fa3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1cb3ab47122820b9c6978423317f0fa3 columnFamilyName cf 2024-12-01T17:05:47,655 DEBUG [StoreOpener-12611dd65a0f3153fc731cd592206f66-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:05:47,656 DEBUG [StoreOpener-1cb3ab47122820b9c6978423317f0fa3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:05:47,656 INFO [StoreOpener-12611dd65a0f3153fc731cd592206f66-1 {}] regionserver.HStore(327): Store=12611dd65a0f3153fc731cd592206f66/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T17:05:47,656 INFO [StoreOpener-1cb3ab47122820b9c6978423317f0fa3-1 {}] regionserver.HStore(327): Store=1cb3ab47122820b9c6978423317f0fa3/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T17:05:47,656 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1038): replaying wal for 12611dd65a0f3153fc731cd592206f66 2024-12-01T17:05:47,656 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1038): replaying wal for 1cb3ab47122820b9c6978423317f0fa3 2024-12-01T17:05:47,658 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSplitRegion/1cb3ab47122820b9c6978423317f0fa3 2024-12-01T17:05:47,658 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSplitRegion/12611dd65a0f3153fc731cd592206f66 2024-12-01T17:05:47,658 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSplitRegion/1cb3ab47122820b9c6978423317f0fa3 2024-12-01T17:05:47,658 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSplitRegion/12611dd65a0f3153fc731cd592206f66 2024-12-01T17:05:47,659 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1048): stopping wal replay for 12611dd65a0f3153fc731cd592206f66 2024-12-01T17:05:47,659 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1048): stopping wal replay for 1cb3ab47122820b9c6978423317f0fa3 2024-12-01T17:05:47,659 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1060): Cleaning up temporary data for 1cb3ab47122820b9c6978423317f0fa3 2024-12-01T17:05:47,659 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1060): Cleaning up temporary data for 12611dd65a0f3153fc731cd592206f66 2024-12-01T17:05:47,661 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1093): writing seq id for 12611dd65a0f3153fc731cd592206f66 2024-12-01T17:05:47,661 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1093): writing seq id for 1cb3ab47122820b9c6978423317f0fa3 2024-12-01T17:05:47,664 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSplitRegion/12611dd65a0f3153fc731cd592206f66/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T17:05:47,664 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSplitRegion/1cb3ab47122820b9c6978423317f0fa3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T17:05:47,665 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1114): Opened 12611dd65a0f3153fc731cd592206f66; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75281772, jitterRate=0.12178581953048706}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T17:05:47,665 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1114): Opened 1cb3ab47122820b9c6978423317f0fa3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65205255, jitterRate=-0.028365984559059143}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T17:05:47,665 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1cb3ab47122820b9c6978423317f0fa3 2024-12-01T17:05:47,665 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 12611dd65a0f3153fc731cd592206f66 2024-12-01T17:05:47,667 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1006): Region open journal for 12611dd65a0f3153fc731cd592206f66: Running coprocessor pre-open hook at 1733072747651Writing region info on filesystem at 1733072747651Initializing all the Stores at 1733072747653 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733072747653Cleaning up temporary data from old regions at 1733072747659 (+6 ms)Running coprocessor post-open hooks at 1733072747666 (+7 ms)Region opened successfully at 1733072747667 (+1 ms) 2024-12-01T17:05:47,667 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1006): Region open journal for 1cb3ab47122820b9c6978423317f0fa3: Running coprocessor pre-open hook at 1733072747648Writing region info on filesystem at 1733072747649 (+1 ms)Initializing all the Stores at 1733072747651 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733072747651Cleaning up temporary data from old regions at 1733072747659 (+8 ms)Running coprocessor post-open hooks at 1733072747665 (+6 ms)Region opened successfully at 1733072747667 (+2 ms) 2024-12-01T17:05:47,669 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSplitRegion,,1733072746798.12611dd65a0f3153fc731cd592206f66., pid=11, masterSystemTime=1733072747628 2024-12-01T17:05:47,669 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSplitRegion,1,1733072746798.1cb3ab47122820b9c6978423317f0fa3., pid=10, masterSystemTime=1733072747626 2024-12-01T17:05:47,673 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSplitRegion,1,1733072746798.1cb3ab47122820b9c6978423317f0fa3. 2024-12-01T17:05:47,673 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSplitRegion,1,1733072746798.1cb3ab47122820b9c6978423317f0fa3. 2024-12-01T17:05:47,675 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=1cb3ab47122820b9c6978423317f0fa3, regionState=OPEN, openSeqNum=2, regionLocation=b87883a7882d,44753,1733072737741 2024-12-01T17:05:47,675 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSplitRegion,,1733072746798.12611dd65a0f3153fc731cd592206f66. 2024-12-01T17:05:47,675 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSplitRegion,,1733072746798.12611dd65a0f3153fc731cd592206f66. 2024-12-01T17:05:47,677 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=12611dd65a0f3153fc731cd592206f66, regionState=OPEN, openSeqNum=2, regionLocation=b87883a7882d,41375,1733072737967 2024-12-01T17:05:47,679 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1cb3ab47122820b9c6978423317f0fa3, server=b87883a7882d,44753,1733072737741 because future has completed 2024-12-01T17:05:47,681 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 12611dd65a0f3153fc731cd592206f66, server=b87883a7882d,41375,1733072737967 because future has completed 2024-12-01T17:05:47,685 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-12-01T17:05:47,686 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; OpenRegionProcedure 1cb3ab47122820b9c6978423317f0fa3, server=b87883a7882d,44753,1733072737741 in 210 msec 2024-12-01T17:05:47,688 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=8 2024-12-01T17:05:47,688 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=8, state=SUCCESS, hasLock=false; OpenRegionProcedure 12611dd65a0f3153fc731cd592206f66, server=b87883a7882d,41375,1733072737967 in 211 msec 2024-12-01T17:05:47,688 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=1cb3ab47122820b9c6978423317f0fa3, ASSIGN in 375 msec 2024-12-01T17:05:47,693 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-01T17:05:47,693 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=12611dd65a0f3153fc731cd592206f66, ASSIGN in 378 msec 2024-12-01T17:05:47,695 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-01T17:05:47,695 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733072747695"}]},"ts":"1733072747695"} 2024-12-01T17:05:47,699 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=ENABLED in hbase:meta 2024-12-01T17:05:47,701 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-01T17:05:47,705 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSplitRegion jenkins: RWXCA 2024-12-01T17:05:47,716 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:05:47,718 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:05:47,720 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42021, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:05:47,724 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36521 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:05:47,724 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36521 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:05:47,725 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36521 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:05:47,726 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34603, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=ClientService 2024-12-01T17:05:47,729 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:05:47,729 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:05:47,730 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54631, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=ClientService 2024-12-01T17:05:47,733 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36521 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-01T17:05:47,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-01T17:05:47,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-01T17:05:47,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-01T17:05:47,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-01T17:05:47,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T17:05:47,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T17:05:47,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T17:05:47,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T17:05:47,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-01T17:05:47,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-01T17:05:47,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-01T17:05:47,822 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-01T17:05:47,823 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-01T17:05:47,823 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-01T17:05:47,823 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-01T17:05:47,826 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 1.0240 sec 2024-12-01T17:05:47,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-01T17:05:47,941 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-01T17:05:47,941 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemStateWithSplitRegion get assigned. Timeout = 60000ms 2024-12-01T17:05:47,942 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T17:05:47,948 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemStateWithSplitRegion assigned to meta. Checking AM states. 2024-12-01T17:05:47,949 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T17:05:47,949 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemStateWithSplitRegion assigned. 2024-12-01T17:05:47,953 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-01T17:05:47,968 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-01T17:05:47,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733072747968 (current time:1733072747968). 2024-12-01T17:05:47,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-01T17:05:47,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-12-01T17:05:47,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-01T17:05:47,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17d28f47, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:05:47,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:05:47,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:05:47,972 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:05:47,972 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:05:47,972 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:05:47,973 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76abe215, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:05:47,973 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:05:47,973 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:05:47,973 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:05:47,974 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40730, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:05:47,976 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ec7502b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:05:47,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:05:47,978 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:05:47,978 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:05:47,979 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53094, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:05:47,982 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737. 2024-12-01T17:05:47,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:05:47,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:05:47,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:05:47,988 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:05:47,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2817cfc3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:05:47,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:05:47,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:05:47,991 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:05:47,991 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:05:47,991 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:05:47,992 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64b6371f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:05:47,992 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:05:47,992 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:05:47,992 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:05:47,993 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40750, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:05:47,994 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c23717f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:05:47,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:05:47,996 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:05:47,996 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:05:47,998 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53106, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:05:48,001 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:05:48,002 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:05:48,003 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43162, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:05:48,006 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737. 2024-12-01T17:05:48,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:05:48,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:05:48,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:05:48,007 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:05:48,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-01T17:05:48,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-01T17:05:48,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-01T17:05:48,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-12-01T17:05:48,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-01T17:05:48,021 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-01T17:05:48,028 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-01T17:05:48,043 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-01T17:05:48,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741845_1021 (size=215) 2024-12-01T17:05:48,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741845_1021 (size=215) 2024-12-01T17:05:48,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741845_1021 (size=215) 2024-12-01T17:05:48,058 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-01T17:05:48,061 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 12611dd65a0f3153fc731cd592206f66}, {pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1cb3ab47122820b9c6978423317f0fa3}] 2024-12-01T17:05:48,065 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1cb3ab47122820b9c6978423317f0fa3 2024-12-01T17:05:48,065 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 12611dd65a0f3153fc731cd592206f66 2024-12-01T17:05:48,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-01T17:05:48,224 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41375 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=13 2024-12-01T17:05:48,224 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44753 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=14 2024-12-01T17:05:48,224 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733072746798.1cb3ab47122820b9c6978423317f0fa3. 2024-12-01T17:05:48,224 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733072746798.12611dd65a0f3153fc731cd592206f66. 2024-12-01T17:05:48,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 1cb3ab47122820b9c6978423317f0fa3: 2024-12-01T17:05:48,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.HRegion(2603): Flush status journal for 12611dd65a0f3153fc731cd592206f66: 2024-12-01T17:05:48,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,1,1733072746798.1cb3ab47122820b9c6978423317f0fa3. for emptySnaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-01T17:05:48,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,,1733072746798.12611dd65a0f3153fc731cd592206f66. for emptySnaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-01T17:05:48,231 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,1,1733072746798.1cb3ab47122820b9c6978423317f0fa3.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-01T17:05:48,231 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,,1733072746798.12611dd65a0f3153fc731cd592206f66.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-01T17:05:48,234 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:05:48,234 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:05:48,236 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-01T17:05:48,236 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-01T17:05:48,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741846_1022 (size=86) 2024-12-01T17:05:48,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741847_1023 (size=86) 2024-12-01T17:05:48,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741847_1023 (size=86) 2024-12-01T17:05:48,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741846_1022 (size=86) 2024-12-01T17:05:48,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741846_1022 (size=86) 2024-12-01T17:05:48,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741847_1023 (size=86) 2024-12-01T17:05:48,252 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733072746798.12611dd65a0f3153fc731cd592206f66. 2024-12-01T17:05:48,253 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733072746798.1cb3ab47122820b9c6978423317f0fa3. 2024-12-01T17:05:48,254 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-12-01T17:05:48,254 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-12-01T17:05:48,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-12-01T17:05:48,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=13 2024-12-01T17:05:48,257 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion on region 12611dd65a0f3153fc731cd592206f66 2024-12-01T17:05:48,257 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion on region 1cb3ab47122820b9c6978423317f0fa3 2024-12-01T17:05:48,257 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1cb3ab47122820b9c6978423317f0fa3 2024-12-01T17:05:48,257 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 12611dd65a0f3153fc731cd592206f66 2024-12-01T17:05:48,262 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 1cb3ab47122820b9c6978423317f0fa3 in 199 msec 2024-12-01T17:05:48,265 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=12 2024-12-01T17:05:48,265 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 12611dd65a0f3153fc731cd592206f66 in 199 msec 2024-12-01T17:05:48,265 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-01T17:05:48,268 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-01T17:05:48,271 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-01T17:05:48,272 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-01T17:05:48,275 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-01T17:05:48,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741848_1024 (size=597) 2024-12-01T17:05:48,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741848_1024 (size=597) 2024-12-01T17:05:48,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741848_1024 (size=597) 2024-12-01T17:05:48,308 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-01T17:05:48,330 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-01T17:05:48,331 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSplitRegion to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-01T17:05:48,337 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-01T17:05:48,337 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-12-01T17:05:48,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-01T17:05:48,342 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 322 msec 2024-12-01T17:05:48,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-01T17:05:48,651 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-01T17:05:48,665 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='0479cff7a56f133561104efb7eb29986c', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,,1733072746798.12611dd65a0f3153fc731cd592206f66., hostname=b87883a7882d,41375,1733072737967, seqNum=2] 2024-12-01T17:05:48,669 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='123dfb6489b404d272194729699131974', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,1,1733072746798.1cb3ab47122820b9c6978423317f0fa3., hostname=b87883a7882d,44753,1733072737741, seqNum=2] 2024-12-01T17:05:48,672 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='212cc9449e6d7dc8463679ca42b78294b', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,1,1733072746798.1cb3ab47122820b9c6978423317f0fa3., hostname=b87883a7882d,44753,1733072737741, seqNum=2] 2024-12-01T17:05:48,674 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='3605404cd99922fc30145236261486599', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,1,1733072746798.1cb3ab47122820b9c6978423317f0fa3., hostname=b87883a7882d,44753,1733072737741, seqNum=2] 2024-12-01T17:05:48,676 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='4f38440c1d16f5fd4a25abf76ca3fb5f6', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,1,1733072746798.1cb3ab47122820b9c6978423317f0fa3., hostname=b87883a7882d,44753,1733072737741, seqNum=2] 2024-12-01T17:05:48,677 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:05:48,677 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='6f8cc5ab74ddfb70e2d5b4d0e908c2329', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,1,1733072746798.1cb3ab47122820b9c6978423317f0fa3., hostname=b87883a7882d,44753,1733072737741, seqNum=2] 2024-12-01T17:05:48,679 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='548a6747cc380a0c2f2f45a019e7642f6', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,1,1733072746798.1cb3ab47122820b9c6978423317f0fa3., hostname=b87883a7882d,44753,1733072737741, seqNum=2] 2024-12-01T17:05:48,680 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='3f0c88f506eaf1b168c3af11c3661219', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,1,1733072746798.1cb3ab47122820b9c6978423317f0fa3., hostname=b87883a7882d,44753,1733072737741, seqNum=2] 2024-12-01T17:05:48,682 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='83362f1511b41800be637eb40cdf05ce', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,1,1733072746798.1cb3ab47122820b9c6978423317f0fa3., hostname=b87883a7882d,44753,1733072737741, seqNum=2] 2024-12-01T17:05:48,682 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41934, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:05:48,684 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41375 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSplitRegion,,1733072746798.12611dd65a0f3153fc731cd592206f66. with WAL disabled. Data may be lost in the event of a crash. 2024-12-01T17:05:48,689 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44753 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSplitRegion,1,1733072746798.1cb3ab47122820b9c6978423317f0fa3. with WAL disabled. Data may be lost in the event of a crash. 2024-12-01T17:05:48,694 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-01T17:05:48,699 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSplitRegion 2024-12-01T17:05:48,700 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSplitRegion,,1733072746798.12611dd65a0f3153fc731cd592206f66. 2024-12-01T17:05:48,701 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T17:05:48,705 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-01T17:05:48,726 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-01T17:05:48,740 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-01T17:05:48,747 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-01T17:05:48,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733072748747 (current time:1733072748747). 2024-12-01T17:05:48,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-01T17:05:48,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-12-01T17:05:48,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-01T17:05:48,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@701915bf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:05:48,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:05:48,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:05:48,750 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:05:48,750 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:05:48,750 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:05:48,751 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a140166, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:05:48,751 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:05:48,751 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:05:48,752 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:05:48,753 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40762, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:05:48,755 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26cf3b25, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:05:48,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:05:48,757 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:05:48,757 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:05:48,759 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53118, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:05:48,761 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737. 2024-12-01T17:05:48,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:05:48,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:05:48,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:05:48,761 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:05:48,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@762ee350, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:05:48,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:05:48,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:05:48,763 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:05:48,764 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:05:48,764 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:05:48,764 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b794511, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:05:48,765 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:05:48,765 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:05:48,765 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:05:48,766 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40782, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:05:48,767 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7bcaf6f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:05:48,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:05:48,770 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:05:48,771 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:05:48,772 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53126, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:05:48,776 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:05:48,777 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:05:48,778 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43166, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:05:48,780 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737. 2024-12-01T17:05:48,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:05:48,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:05:48,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:05:48,780 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:05:48,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-01T17:05:48,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-01T17:05:48,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-01T17:05:48,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-12-01T17:05:48,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-01T17:05:48,785 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-01T17:05:48,787 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-01T17:05:48,791 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-01T17:05:48,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741849_1025 (size=210) 2024-12-01T17:05:48,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741849_1025 (size=210) 2024-12-01T17:05:48,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741849_1025 (size=210) 2024-12-01T17:05:48,804 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-01T17:05:48,804 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 12611dd65a0f3153fc731cd592206f66}, {pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1cb3ab47122820b9c6978423317f0fa3}] 2024-12-01T17:05:48,806 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 12611dd65a0f3153fc731cd592206f66 2024-12-01T17:05:48,806 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1cb3ab47122820b9c6978423317f0fa3 2024-12-01T17:05:48,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-01T17:05:48,958 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41375 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=16 2024-12-01T17:05:48,958 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44753 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=17 2024-12-01T17:05:48,959 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733072746798.12611dd65a0f3153fc731cd592206f66. 2024-12-01T17:05:48,959 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733072746798.1cb3ab47122820b9c6978423317f0fa3. 2024-12-01T17:05:48,963 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2902): Flushing 12611dd65a0f3153fc731cd592206f66 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-01T17:05:48,963 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2902): Flushing 1cb3ab47122820b9c6978423317f0fa3 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-01T17:05:49,029 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSplitRegion/12611dd65a0f3153fc731cd592206f66/.tmp/cf/13af3a19c8aa42c0aca603c48f410e83 is 71, key is 07ff2be27fa456305023dc96ce8a2842/cf:q/1733072748684/Put/seqid=0 2024-12-01T17:05:49,029 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSplitRegion/1cb3ab47122820b9c6978423317f0fa3/.tmp/cf/ae21be71d7b540b2a104ed497784474b is 71, key is 1176ddd9b5f13c7e968f3cad3eb69208/cf:q/1733072748688/Put/seqid=0 2024-12-01T17:05:49,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741851_1027 (size=8256) 2024-12-01T17:05:49,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741850_1026 (size=5356) 2024-12-01T17:05:49,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741850_1026 (size=5356) 2024-12-01T17:05:49,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741851_1027 (size=8256) 2024-12-01T17:05:49,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741851_1027 (size=8256) 2024-12-01T17:05:49,055 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSplitRegion/1cb3ab47122820b9c6978423317f0fa3/.tmp/cf/ae21be71d7b540b2a104ed497784474b 2024-12-01T17:05:49,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741850_1026 (size=5356) 2024-12-01T17:05:49,059 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSplitRegion/12611dd65a0f3153fc731cd592206f66/.tmp/cf/13af3a19c8aa42c0aca603c48f410e83 2024-12-01T17:05:49,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-01T17:05:49,144 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSplitRegion/12611dd65a0f3153fc731cd592206f66/.tmp/cf/13af3a19c8aa42c0aca603c48f410e83 as hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSplitRegion/12611dd65a0f3153fc731cd592206f66/cf/13af3a19c8aa42c0aca603c48f410e83 2024-12-01T17:05:49,144 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSplitRegion/1cb3ab47122820b9c6978423317f0fa3/.tmp/cf/ae21be71d7b540b2a104ed497784474b as hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSplitRegion/1cb3ab47122820b9c6978423317f0fa3/cf/ae21be71d7b540b2a104ed497784474b 2024-12-01T17:05:49,161 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSplitRegion/12611dd65a0f3153fc731cd592206f66/cf/13af3a19c8aa42c0aca603c48f410e83, entries=4, sequenceid=6, filesize=5.2 K 2024-12-01T17:05:49,161 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSplitRegion/1cb3ab47122820b9c6978423317f0fa3/cf/ae21be71d7b540b2a104ed497784474b, entries=46, sequenceid=6, filesize=8.1 K 2024-12-01T17:05:49,171 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 12611dd65a0f3153fc731cd592206f66 in 206ms, sequenceid=6, compaction requested=false 2024-12-01T17:05:49,171 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 1cb3ab47122820b9c6978423317f0fa3 in 206ms, sequenceid=6, compaction requested=false 2024-12-01T17:05:49,172 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSplitRegion' 2024-12-01T17:05:49,172 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSplitRegion' 2024-12-01T17:05:49,173 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2603): Flush status journal for 1cb3ab47122820b9c6978423317f0fa3: 2024-12-01T17:05:49,173 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,1,1733072746798.1cb3ab47122820b9c6978423317f0fa3. for snaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-01T17:05:49,174 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,1,1733072746798.1cb3ab47122820b9c6978423317f0fa3.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-01T17:05:49,174 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:05:49,174 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSplitRegion/1cb3ab47122820b9c6978423317f0fa3/cf/ae21be71d7b540b2a104ed497784474b] hfiles 2024-12-01T17:05:49,177 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSplitRegion/1cb3ab47122820b9c6978423317f0fa3/cf/ae21be71d7b540b2a104ed497784474b for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-01T17:05:49,179 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2603): Flush status journal for 12611dd65a0f3153fc731cd592206f66: 2024-12-01T17:05:49,179 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,,1733072746798.12611dd65a0f3153fc731cd592206f66. for snaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-01T17:05:49,180 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,,1733072746798.12611dd65a0f3153fc731cd592206f66.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-01T17:05:49,180 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:05:49,180 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSplitRegion/12611dd65a0f3153fc731cd592206f66/cf/13af3a19c8aa42c0aca603c48f410e83] hfiles 2024-12-01T17:05:49,180 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSplitRegion/12611dd65a0f3153fc731cd592206f66/cf/13af3a19c8aa42c0aca603c48f410e83 for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-01T17:05:49,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741852_1028 (size=125) 2024-12-01T17:05:49,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741852_1028 (size=125) 2024-12-01T17:05:49,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741852_1028 (size=125) 2024-12-01T17:05:49,249 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733072746798.1cb3ab47122820b9c6978423317f0fa3. 2024-12-01T17:05:49,249 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-12-01T17:05:49,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=17 2024-12-01T17:05:49,250 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSplitRegion on region 1cb3ab47122820b9c6978423317f0fa3 2024-12-01T17:05:49,250 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1cb3ab47122820b9c6978423317f0fa3 2024-12-01T17:05:49,255 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=17, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 1cb3ab47122820b9c6978423317f0fa3 in 448 msec 2024-12-01T17:05:49,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741853_1029 (size=125) 2024-12-01T17:05:49,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741853_1029 (size=125) 2024-12-01T17:05:49,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741853_1029 (size=125) 2024-12-01T17:05:49,312 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733072746798.12611dd65a0f3153fc731cd592206f66. 2024-12-01T17:05:49,312 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=16 2024-12-01T17:05:49,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=16 2024-12-01T17:05:49,313 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSplitRegion on region 12611dd65a0f3153fc731cd592206f66 2024-12-01T17:05:49,313 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 12611dd65a0f3153fc731cd592206f66 2024-12-01T17:05:49,319 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=16, resume processing ppid=15 2024-12-01T17:05:49,319 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=16, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 12611dd65a0f3153fc731cd592206f66 in 511 msec 2024-12-01T17:05:49,319 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-01T17:05:49,321 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-01T17:05:49,323 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-01T17:05:49,323 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-01T17:05:49,324 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-01T17:05:49,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741854_1030 (size=675) 2024-12-01T17:05:49,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741854_1030 (size=675) 2024-12-01T17:05:49,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741854_1030 (size=675) 2024-12-01T17:05:49,362 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-01T17:05:49,371 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-01T17:05:49,372 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSplitRegion to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-01T17:05:49,374 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-01T17:05:49,374 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-12-01T17:05:49,377 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=15, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 593 msec 2024-12-01T17:05:49,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-01T17:05:49,411 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-01T17:05:49,435 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-01T17:05:49,436 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-01T17:05:49,436 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-01T17:05:49,438 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41946, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-01T17:05:49,438 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43176, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-01T17:05:49,439 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53132, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-01T17:05:49,439 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41375 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-01T17:05:49,439 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44753 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-01T17:05:49,439 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36521 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-01T17:05:49,442 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testExportFileSystemStateWithSplitRegion', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T17:05:49,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=18, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-01T17:05:49,451 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-01T17:05:49,451 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:05:49,451 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testExportFileSystemStateWithSplitRegion" procId is: 18 2024-12-01T17:05:49,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-01T17:05:49,455 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-01T17:05:49,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741855_1031 (size=390) 2024-12-01T17:05:49,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741855_1031 (size=390) 2024-12-01T17:05:49,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741855_1031 (size=390) 2024-12-01T17:05:49,490 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 0e879504207e174ec4d3192326092bb1, NAME => 'testExportFileSystemStateWithSplitRegion,,1733072749442.0e879504207e174ec4d3192326092bb1.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:05:49,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741856_1032 (size=75) 2024-12-01T17:05:49,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741856_1032 (size=75) 2024-12-01T17:05:49,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741856_1032 (size=75) 2024-12-01T17:05:49,539 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1733072749442.0e879504207e174ec4d3192326092bb1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:05:49,539 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1722): Closing 0e879504207e174ec4d3192326092bb1, disabling compactions & flushes 2024-12-01T17:05:49,539 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1733072749442.0e879504207e174ec4d3192326092bb1. 2024-12-01T17:05:49,539 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1733072749442.0e879504207e174ec4d3192326092bb1. 2024-12-01T17:05:49,539 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1733072749442.0e879504207e174ec4d3192326092bb1. after waiting 0 ms 2024-12-01T17:05:49,539 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1733072749442.0e879504207e174ec4d3192326092bb1. 2024-12-01T17:05:49,539 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1733072749442.0e879504207e174ec4d3192326092bb1. 2024-12-01T17:05:49,540 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for 0e879504207e174ec4d3192326092bb1: Waiting for close lock at 1733072749539Disabling compacts and flushes for region at 1733072749539Disabling writes for close at 1733072749539Writing region close event to WAL at 1733072749539Closed at 1733072749539 2024-12-01T17:05:49,544 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-01T17:05:49,545 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportFileSystemStateWithSplitRegion,,1733072749442.0e879504207e174ec4d3192326092bb1.","families":{"info":[{"qualifier":"regioninfo","vlen":74,"tag":[],"timestamp":"1733072749544"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733072749544"}]},"ts":"1733072749544"} 2024-12-01T17:05:49,551 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-01T17:05:49,554 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-01T17:05:49,556 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733072749554"}]},"ts":"1733072749554"} 2024-12-01T17:05:49,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-01T17:05:49,560 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=ENABLING in hbase:meta 2024-12-01T17:05:49,561 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {b87883a7882d=0} racks are {/default-rack=0} 2024-12-01T17:05:49,562 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-01T17:05:49,563 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-01T17:05:49,563 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-01T17:05:49,563 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-01T17:05:49,563 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-01T17:05:49,563 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-01T17:05:49,563 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-01T17:05:49,563 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-01T17:05:49,563 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-01T17:05:49,563 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-01T17:05:49,563 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=0e879504207e174ec4d3192326092bb1, ASSIGN}] 2024-12-01T17:05:49,566 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=0e879504207e174ec4d3192326092bb1, ASSIGN 2024-12-01T17:05:49,568 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=0e879504207e174ec4d3192326092bb1, ASSIGN; state=OFFLINE, location=b87883a7882d,41375,1733072737967; forceNewPlan=false, retain=false 2024-12-01T17:05:49,719 INFO [b87883a7882d:38737 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-01T17:05:49,719 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=0e879504207e174ec4d3192326092bb1, regionState=OPENING, regionLocation=b87883a7882d,41375,1733072737967 2024-12-01T17:05:49,725 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=0e879504207e174ec4d3192326092bb1, ASSIGN because future has completed 2024-12-01T17:05:49,727 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=20, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0e879504207e174ec4d3192326092bb1, server=b87883a7882d,41375,1733072737967}] 2024-12-01T17:05:49,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-01T17:05:49,945 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,,1733072749442.0e879504207e174ec4d3192326092bb1. 2024-12-01T17:05:49,945 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7752): Opening region: {ENCODED => 0e879504207e174ec4d3192326092bb1, NAME => 'testExportFileSystemStateWithSplitRegion,,1733072749442.0e879504207e174ec4d3192326092bb1.', STARTKEY => '', ENDKEY => ''} 2024-12-01T17:05:49,945 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,,1733072749442.0e879504207e174ec4d3192326092bb1. service=AccessControlService 2024-12-01T17:05:49,946 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-01T17:05:49,946 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion 0e879504207e174ec4d3192326092bb1 2024-12-01T17:05:49,946 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1733072749442.0e879504207e174ec4d3192326092bb1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:05:49,946 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7794): checking encryption for 0e879504207e174ec4d3192326092bb1 2024-12-01T17:05:49,947 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7797): checking classloading for 0e879504207e174ec4d3192326092bb1 2024-12-01T17:05:49,949 INFO [StoreOpener-0e879504207e174ec4d3192326092bb1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 0e879504207e174ec4d3192326092bb1 2024-12-01T17:05:49,952 INFO [StoreOpener-0e879504207e174ec4d3192326092bb1-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0e879504207e174ec4d3192326092bb1 columnFamilyName cf 2024-12-01T17:05:49,952 DEBUG [StoreOpener-0e879504207e174ec4d3192326092bb1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:05:49,954 INFO [StoreOpener-0e879504207e174ec4d3192326092bb1-1 {}] regionserver.HStore(327): Store=0e879504207e174ec4d3192326092bb1/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T17:05:49,954 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1038): replaying wal for 0e879504207e174ec4d3192326092bb1 2024-12-01T17:05:49,955 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/0e879504207e174ec4d3192326092bb1 2024-12-01T17:05:49,956 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/0e879504207e174ec4d3192326092bb1 2024-12-01T17:05:49,957 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1048): stopping wal replay for 0e879504207e174ec4d3192326092bb1 2024-12-01T17:05:49,957 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1060): Cleaning up temporary data for 0e879504207e174ec4d3192326092bb1 2024-12-01T17:05:49,960 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1093): writing seq id for 0e879504207e174ec4d3192326092bb1 2024-12-01T17:05:49,973 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/0e879504207e174ec4d3192326092bb1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T17:05:49,974 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1114): Opened 0e879504207e174ec4d3192326092bb1; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60011301, jitterRate=-0.10576193034648895}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T17:05:49,974 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0e879504207e174ec4d3192326092bb1 2024-12-01T17:05:49,975 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1006): Region open journal for 0e879504207e174ec4d3192326092bb1: Running coprocessor pre-open hook at 1733072749947Writing region info on filesystem at 1733072749947Initializing all the Stores at 1733072749948 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733072749948Cleaning up temporary data from old regions at 1733072749957 (+9 ms)Running coprocessor post-open hooks at 1733072749974 (+17 ms)Region opened successfully at 1733072749975 (+1 ms) 2024-12-01T17:05:49,976 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,,1733072749442.0e879504207e174ec4d3192326092bb1., pid=20, masterSystemTime=1733072749891 2024-12-01T17:05:49,980 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,,1733072749442.0e879504207e174ec4d3192326092bb1. 2024-12-01T17:05:49,980 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,,1733072749442.0e879504207e174ec4d3192326092bb1. 2024-12-01T17:05:49,981 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=0e879504207e174ec4d3192326092bb1, regionState=OPEN, openSeqNum=2, regionLocation=b87883a7882d,41375,1733072737967 2024-12-01T17:05:49,984 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=20, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0e879504207e174ec4d3192326092bb1, server=b87883a7882d,41375,1733072737967 because future has completed 2024-12-01T17:05:49,991 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=20, resume processing ppid=19 2024-12-01T17:05:49,991 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=20, ppid=19, state=SUCCESS, hasLock=false; OpenRegionProcedure 0e879504207e174ec4d3192326092bb1, server=b87883a7882d,41375,1733072737967 in 259 msec 2024-12-01T17:05:49,995 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=19, resume processing ppid=18 2024-12-01T17:05:49,995 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=19, ppid=18, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=0e879504207e174ec4d3192326092bb1, ASSIGN in 428 msec 2024-12-01T17:05:49,998 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-01T17:05:49,999 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733072749998"}]},"ts":"1733072749998"} 2024-12-01T17:05:50,005 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=ENABLED in hbase:meta 2024-12-01T17:05:50,007 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-01T17:05:50,008 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testExportFileSystemStateWithSplitRegion jenkins: RWXCA 2024-12-01T17:05:50,015 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36521 {}] access.PermissionStorage(613): Read acl: entry[testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-01T17:05:50,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:05:50,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:05:50,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:05:50,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:05:50,044 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-01T17:05:50,044 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-01T17:05:50,044 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-01T17:05:50,044 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-01T17:05:50,044 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-01T17:05:50,044 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-01T17:05:50,045 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-01T17:05:50,045 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-01T17:05:50,048 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=18, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion in 600 msec 2024-12-01T17:05:50,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-01T17:05:50,080 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-01T17:05:50,081 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T17:05:50,083 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T17:05:52,328 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportFileSystemStateWithSplitRegion' 2024-12-01T17:05:52,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741857_1033 (size=134217728) 2024-12-01T17:05:52,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741857_1033 (size=134217728) 2024-12-01T17:05:52,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741857_1033 (size=134217728) 2024-12-01T17:05:53,152 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-01T17:05:54,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741858_1034 (size=134217728) 2024-12-01T17:05:54,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741858_1034 (size=134217728) 2024-12-01T17:05:54,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741858_1034 (size=134217728) 2024-12-01T17:05:54,974 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/output/cf/test_file is 35, key is 1\x00\x00\x00/cf:q/1733072750087/Put/seqid=0 2024-12-01T17:05:54,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741859_1035 (size=51979256) 2024-12-01T17:05:54,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741859_1035 (size=51979256) 2024-12-01T17:05:54,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741859_1035 (size=51979256) 2024-12-01T17:05:54,987 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41e6e8ee, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:05:54,987 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:05:54,987 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:05:54,989 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:05:54,990 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:05:54,990 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:05:54,991 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3196a6a2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:05:54,991 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:05:54,991 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:05:54,992 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:05:54,995 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54358, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:05:54,996 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72e2718c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:05:54,997 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:05:54,999 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:05:54,999 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:05:55,001 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36770, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:05:55,014 WARN [Time-limited test {}] tool.BulkLoadHFilesTool$1(330): Trying to bulk load hfile hdfs://localhost:44451/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/output/cf/test_file with size: 320414712 bytes can be problematic as it may lead to oversplitting. 2024-12-01T17:05:55,015 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-01T17:05:55,016 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.AsyncConnectionImpl(321): The fetched master address is b87883a7882d,38737,1733072736902 2024-12-01T17:05:55,016 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5e4b99ea 2024-12-01T17:05:55,017 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-01T17:05:55,019 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54360, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-01T17:05:55,028 WARN [IPC Server handler 4 on default port 44451 {}] namenode.FSNamesystem(6314): trying to get DT with no secret manager running 2024-12-01T17:05:55,035 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportFileSystemStateWithSplitRegion', row='', locateType=CURRENT is [region=testExportFileSystemStateWithSplitRegion,,1733072749442.0e879504207e174ec4d3192326092bb1., hostname=b87883a7882d,41375,1733072737967, seqNum=2] 2024-12-01T17:05:55,039 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:05:55,042 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56698, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:05:55,048 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-01T17:05:55,073 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:44451/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/output/cf/test_file first=Optional[1\x00\x00\x00] last=Optional[9\x00\x00\x00] 2024-12-01T17:05:55,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41375 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:05:55,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41375 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:05:55,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41375 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:05:55,106 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42299, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=ClientService 2024-12-01T17:05:55,107 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44753 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-01T17:05:55,111 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44753 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: ExecService size: 101 connection: 172.17.0.3:42299 deadline: 1733072815106, exception=org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 2024-12-01T17:05:55,134 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41375 {}] regionserver.SecureBulkLoadManager(227): unable to add token java.util.concurrent.ExecutionException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:396) ~[?:?] at java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073) ~[?:?] at org.apache.hadoop.hbase.regionserver.SecureBulkLoadManager.secureBulkLoadHFiles(SecureBulkLoadManager.java:221) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.bulkLoadHFile(RSRpcServices.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43510) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] Caused by: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.RegionCoprocessorRpcChannelImpl.lambda$rpcCall$0(RegionCoprocessorRpcChannelImpl.java:90) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T17:05:55,136 WARN [IPC Server handler 4 on default port 44451 {}] namenode.FSNamesystem(6314): trying to get DT with no secret manager running 2024-12-01T17:05:55,170 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41375 {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:44451/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/output/cf/test_file for inclusion in 0e879504207e174ec4d3192326092bb1/cf 2024-12-01T17:05:55,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41375 {}] regionserver.HStore(626): HFile bounds: first=1\x00\x00\x00 last=9\x00\x00\x00 2024-12-01T17:05:55,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41375 {}] regionserver.HStore(628): Region bounds: first= last= 2024-12-01T17:05:55,184 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41375 {}] regionserver.HStore(641): Trying to bulk load hfile hdfs://localhost:44451/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/output/cf/test_file with size: 320414712 bytes can be problematic as it may lead to oversplitting. 2024-12-01T17:05:55,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41375 {}] regionserver.HRegion(2603): Flush status journal for 0e879504207e174ec4d3192326092bb1: 2024-12-01T17:05:55,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41375 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(397): Moving hdfs://localhost:44451/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/output/cf/test_file to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/staging/jenkins__testExportFileSystemStateWithSplitRegion__gr155kdt4k2fgphi6p8ielfei3jdouas479g1vbolrvc529dmh52tip1bpnqtr8p/cf/test_file 2024-12-01T17:05:55,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41375 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/staging/jenkins__testExportFileSystemStateWithSplitRegion__gr155kdt4k2fgphi6p8ielfei3jdouas479g1vbolrvc529dmh52tip1bpnqtr8p/cf/test_file as hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/0e879504207e174ec4d3192326092bb1/cf/fd41708961b547f08dc46c317e0e27c7_SeqId_4_ 2024-12-01T17:05:55,196 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41375 {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/staging/jenkins__testExportFileSystemStateWithSplitRegion__gr155kdt4k2fgphi6p8ielfei3jdouas479g1vbolrvc529dmh52tip1bpnqtr8p/cf/test_file into 0e879504207e174ec4d3192326092bb1/cf as hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/0e879504207e174ec4d3192326092bb1/cf/fd41708961b547f08dc46c317e0e27c7_SeqId_4_ - updating store file list. 2024-12-01T17:05:55,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41375 {}] regionserver.HStoreFile(483): HFile Bloom filter type for fd41708961b547f08dc46c317e0e27c7_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-01T17:05:55,211 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41375 {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/0e879504207e174ec4d3192326092bb1/cf/fd41708961b547f08dc46c317e0e27c7_SeqId_4_ into 0e879504207e174ec4d3192326092bb1/cf 2024-12-01T17:05:55,211 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41375 {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/staging/jenkins__testExportFileSystemStateWithSplitRegion__gr155kdt4k2fgphi6p8ielfei3jdouas479g1vbolrvc529dmh52tip1bpnqtr8p/cf/test_file into 0e879504207e174ec4d3192326092bb1/cf (new location: hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/0e879504207e174ec4d3192326092bb1/cf/fd41708961b547f08dc46c317e0e27c7_SeqId_4_) 2024-12-01T17:05:55,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41375 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(412): Bulk Load done for: hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/staging/jenkins__testExportFileSystemStateWithSplitRegion__gr155kdt4k2fgphi6p8ielfei3jdouas479g1vbolrvc529dmh52tip1bpnqtr8p/cf/test_file 2024-12-01T17:05:55,226 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-01T17:05:55,226 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.tool.BulkLoadHFilesTool.bulkLoad(BulkLoadHFilesTool.java:1125) at org.apache.hadoop.hbase.tool.BulkLoadHFilesTool.run(BulkLoadHFilesTool.java:1176) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemStateWithSplitRegion(TestExportSnapshot.java:229) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-01T17:05:55,227 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:05:55,230 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:05:55,231 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:05:55,231 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-01T17:05:55,239 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportFileSystemStateWithSplitRegion', row='5', locateType=CURRENT is [region=testExportFileSystemStateWithSplitRegion,,1733072749442.0e879504207e174ec4d3192326092bb1., hostname=b87883a7882d,41375,1733072737967, seqNum=2] 2024-12-01T17:05:55,250 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.HMaster$3(2313): Client=jenkins//172.17.0.3 split testExportFileSystemStateWithSplitRegion,,1733072749442.0e879504207e174ec4d3192326092bb1. 2024-12-01T17:05:55,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=b87883a7882d,41375,1733072737967 2024-12-01T17:05:55,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=0e879504207e174ec4d3192326092bb1, daughterA=a7e20bcfa36501b077598b1fda66c308, daughterB=c17d7ff8c5b6ed284ece92b6d32c1363 2024-12-01T17:05:55,266 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=0e879504207e174ec4d3192326092bb1, daughterA=a7e20bcfa36501b077598b1fda66c308, daughterB=c17d7ff8c5b6ed284ece92b6d32c1363 2024-12-01T17:05:55,266 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=0e879504207e174ec4d3192326092bb1, daughterA=a7e20bcfa36501b077598b1fda66c308, daughterB=c17d7ff8c5b6ed284ece92b6d32c1363 2024-12-01T17:05:55,266 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=0e879504207e174ec4d3192326092bb1, daughterA=a7e20bcfa36501b077598b1fda66c308, daughterB=c17d7ff8c5b6ed284ece92b6d32c1363 2024-12-01T17:05:55,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-01T17:05:55,275 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=0e879504207e174ec4d3192326092bb1, UNASSIGN}] 2024-12-01T17:05:55,277 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=0e879504207e174ec4d3192326092bb1, UNASSIGN 2024-12-01T17:05:55,279 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=22 updating hbase:meta row=0e879504207e174ec4d3192326092bb1, regionState=CLOSING, regionLocation=b87883a7882d,41375,1733072737967 2024-12-01T17:05:55,283 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=0e879504207e174ec4d3192326092bb1, UNASSIGN because future has completed 2024-12-01T17:05:55,284 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-01T17:05:55,284 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0e879504207e174ec4d3192326092bb1, server=b87883a7882d,41375,1733072737967}] 2024-12-01T17:05:55,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-01T17:05:55,445 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(122): Close 0e879504207e174ec4d3192326092bb1 2024-12-01T17:05:55,446 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-01T17:05:55,447 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1722): Closing 0e879504207e174ec4d3192326092bb1, disabling compactions & flushes 2024-12-01T17:05:55,447 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1733072749442.0e879504207e174ec4d3192326092bb1. 2024-12-01T17:05:55,447 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1733072749442.0e879504207e174ec4d3192326092bb1. 2024-12-01T17:05:55,447 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1733072749442.0e879504207e174ec4d3192326092bb1. after waiting 0 ms 2024-12-01T17:05:55,447 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1733072749442.0e879504207e174ec4d3192326092bb1. 2024-12-01T17:05:55,460 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/0e879504207e174ec4d3192326092bb1/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=1 2024-12-01T17:05:55,465 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-01T17:05:55,465 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1733072749442.0e879504207e174ec4d3192326092bb1. 2024-12-01T17:05:55,465 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1676): Region close journal for 0e879504207e174ec4d3192326092bb1: Waiting for close lock at 1733072755447Running coprocessor pre-close hooks at 1733072755447Disabling compacts and flushes for region at 1733072755447Disabling writes for close at 1733072755447Writing region close event to WAL at 1733072755450 (+3 ms)Running coprocessor post-close hooks at 1733072755462 (+12 ms)Closed at 1733072755465 (+3 ms) 2024-12-01T17:05:55,471 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(157): Closed 0e879504207e174ec4d3192326092bb1 2024-12-01T17:05:55,472 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=22 updating hbase:meta row=0e879504207e174ec4d3192326092bb1, regionState=CLOSED 2024-12-01T17:05:55,476 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=23, ppid=22, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0e879504207e174ec4d3192326092bb1, server=b87883a7882d,41375,1733072737967 because future has completed 2024-12-01T17:05:55,482 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=23, resume processing ppid=22 2024-12-01T17:05:55,482 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=23, ppid=22, state=SUCCESS, hasLock=false; CloseRegionProcedure 0e879504207e174ec4d3192326092bb1, server=b87883a7882d,41375,1733072737967 in 194 msec 2024-12-01T17:05:55,489 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=22, resume processing ppid=21 2024-12-01T17:05:55,489 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=22, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=0e879504207e174ec4d3192326092bb1, UNASSIGN in 208 msec 2024-12-01T17:05:55,516 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:05:55,520 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=21 splitting 1 storefiles, region=0e879504207e174ec4d3192326092bb1, threads=1 2024-12-01T17:05:55,526 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=21 splitting started for store file: hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/0e879504207e174ec4d3192326092bb1/cf/fd41708961b547f08dc46c317e0e27c7_SeqId_4_ for region: 0e879504207e174ec4d3192326092bb1 2024-12-01T17:05:55,536 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HStoreFile(483): HFile Bloom filter type for fd41708961b547f08dc46c317e0e27c7_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-01T17:05:55,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-01T17:05:55,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741860_1036 (size=21) 2024-12-01T17:05:55,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741860_1036 (size=21) 2024-12-01T17:05:55,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741860_1036 (size=21) 2024-12-01T17:05:55,608 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HStoreFile(483): HFile Bloom filter type for fd41708961b547f08dc46c317e0e27c7_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-01T17:05:55,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741861_1037 (size=21) 2024-12-01T17:05:55,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741861_1037 (size=21) 2024-12-01T17:05:55,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741861_1037 (size=21) 2024-12-01T17:05:55,629 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=21 splitting complete for store file: hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/0e879504207e174ec4d3192326092bb1/cf/fd41708961b547f08dc46c317e0e27c7_SeqId_4_ for region: 0e879504207e174ec4d3192326092bb1 2024-12-01T17:05:55,631 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=21 split storefiles for region 0e879504207e174ec4d3192326092bb1 Daughter A: [hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/a7e20bcfa36501b077598b1fda66c308/cf/fd41708961b547f08dc46c317e0e27c7_SeqId_4_.0e879504207e174ec4d3192326092bb1] storefiles, Daughter B: [hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/c17d7ff8c5b6ed284ece92b6d32c1363/cf/fd41708961b547f08dc46c317e0e27c7_SeqId_4_.0e879504207e174ec4d3192326092bb1] storefiles. 2024-12-01T17:05:55,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741862_1038 (size=76) 2024-12-01T17:05:55,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741862_1038 (size=76) 2024-12-01T17:05:55,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741862_1038 (size=76) 2024-12-01T17:05:55,657 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:05:55,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741863_1039 (size=76) 2024-12-01T17:05:55,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741863_1039 (size=76) 2024-12-01T17:05:55,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741863_1039 (size=76) 2024-12-01T17:05:55,709 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:05:55,720 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/a7e20bcfa36501b077598b1fda66c308/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=-1 2024-12-01T17:05:55,723 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/c17d7ff8c5b6ed284ece92b6d32c1363/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=-1 2024-12-01T17:05:55,727 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,,1733072749442.0e879504207e174ec4d3192326092bb1.","families":{"info":[{"qualifier":"regioninfo","vlen":74,"tag":[],"timestamp":"1733072755726"},{"qualifier":"splitA","vlen":75,"tag":[],"timestamp":"1733072755726"},{"qualifier":"splitB","vlen":75,"tag":[],"timestamp":"1733072755726"}]},"ts":"1733072755726"} 2024-12-01T17:05:55,727 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,,1733072755258.a7e20bcfa36501b077598b1fda66c308.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733072755726"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733072755726"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733072755726"}]},"ts":"1733072755726"} 2024-12-01T17:05:55,728 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,5,1733072755258.c17d7ff8c5b6ed284ece92b6d32c1363.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733072755726"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733072755726"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733072755726"}]},"ts":"1733072755726"} 2024-12-01T17:05:55,743 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=a7e20bcfa36501b077598b1fda66c308, ASSIGN}, {pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=c17d7ff8c5b6ed284ece92b6d32c1363, ASSIGN}] 2024-12-01T17:05:55,744 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=a7e20bcfa36501b077598b1fda66c308, ASSIGN 2024-12-01T17:05:55,744 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=c17d7ff8c5b6ed284ece92b6d32c1363, ASSIGN 2024-12-01T17:05:55,745 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=c17d7ff8c5b6ed284ece92b6d32c1363, ASSIGN; state=SPLITTING_NEW, location=b87883a7882d,41375,1733072737967; forceNewPlan=false, retain=false 2024-12-01T17:05:55,745 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=a7e20bcfa36501b077598b1fda66c308, ASSIGN; state=SPLITTING_NEW, location=b87883a7882d,41375,1733072737967; forceNewPlan=false, retain=false 2024-12-01T17:05:55,896 INFO [b87883a7882d:38737 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-01T17:05:55,896 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=25 updating hbase:meta row=c17d7ff8c5b6ed284ece92b6d32c1363, regionState=OPENING, regionLocation=b87883a7882d,41375,1733072737967 2024-12-01T17:05:55,896 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=a7e20bcfa36501b077598b1fda66c308, regionState=OPENING, regionLocation=b87883a7882d,41375,1733072737967 2024-12-01T17:05:55,900 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=a7e20bcfa36501b077598b1fda66c308, ASSIGN because future has completed 2024-12-01T17:05:55,900 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=26, ppid=24, state=RUNNABLE, hasLock=false; OpenRegionProcedure a7e20bcfa36501b077598b1fda66c308, server=b87883a7882d,41375,1733072737967}] 2024-12-01T17:05:55,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-01T17:05:55,902 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=c17d7ff8c5b6ed284ece92b6d32c1363, ASSIGN because future has completed 2024-12-01T17:05:55,902 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=27, ppid=25, state=RUNNABLE, hasLock=false; OpenRegionProcedure c17d7ff8c5b6ed284ece92b6d32c1363, server=b87883a7882d,41375,1733072737967}] 2024-12-01T17:05:56,059 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,,1733072755258.a7e20bcfa36501b077598b1fda66c308. 2024-12-01T17:05:56,059 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7752): Opening region: {ENCODED => a7e20bcfa36501b077598b1fda66c308, NAME => 'testExportFileSystemStateWithSplitRegion,,1733072755258.a7e20bcfa36501b077598b1fda66c308.', STARTKEY => '', ENDKEY => '5'} 2024-12-01T17:05:56,059 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,,1733072755258.a7e20bcfa36501b077598b1fda66c308. service=AccessControlService 2024-12-01T17:05:56,060 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-01T17:05:56,060 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion a7e20bcfa36501b077598b1fda66c308 2024-12-01T17:05:56,060 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1733072755258.a7e20bcfa36501b077598b1fda66c308.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:05:56,060 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7794): checking encryption for a7e20bcfa36501b077598b1fda66c308 2024-12-01T17:05:56,060 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7797): checking classloading for a7e20bcfa36501b077598b1fda66c308 2024-12-01T17:05:56,062 INFO [StoreOpener-a7e20bcfa36501b077598b1fda66c308-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region a7e20bcfa36501b077598b1fda66c308 2024-12-01T17:05:56,063 INFO [StoreOpener-a7e20bcfa36501b077598b1fda66c308-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a7e20bcfa36501b077598b1fda66c308 columnFamilyName cf 2024-12-01T17:05:56,063 DEBUG [StoreOpener-a7e20bcfa36501b077598b1fda66c308-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:05:56,075 DEBUG [StoreFileOpener-a7e20bcfa36501b077598b1fda66c308-cf-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for fd41708961b547f08dc46c317e0e27c7_SeqId_4_.0e879504207e174ec4d3192326092bb1: NONE, but ROW specified in column family configuration 2024-12-01T17:05:56,092 DEBUG [StoreOpener-a7e20bcfa36501b077598b1fda66c308-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/a7e20bcfa36501b077598b1fda66c308/cf/fd41708961b547f08dc46c317e0e27c7_SeqId_4_.0e879504207e174ec4d3192326092bb1->hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/0e879504207e174ec4d3192326092bb1/cf/fd41708961b547f08dc46c317e0e27c7_SeqId_4_-bottom 2024-12-01T17:05:56,093 INFO [StoreOpener-a7e20bcfa36501b077598b1fda66c308-1 {}] regionserver.HStore(327): Store=a7e20bcfa36501b077598b1fda66c308/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T17:05:56,093 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1038): replaying wal for a7e20bcfa36501b077598b1fda66c308 2024-12-01T17:05:56,094 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/a7e20bcfa36501b077598b1fda66c308 2024-12-01T17:05:56,096 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/a7e20bcfa36501b077598b1fda66c308 2024-12-01T17:05:56,096 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1048): stopping wal replay for a7e20bcfa36501b077598b1fda66c308 2024-12-01T17:05:56,096 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1060): Cleaning up temporary data for a7e20bcfa36501b077598b1fda66c308 2024-12-01T17:05:56,099 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1093): writing seq id for a7e20bcfa36501b077598b1fda66c308 2024-12-01T17:05:56,100 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1114): Opened a7e20bcfa36501b077598b1fda66c308; next sequenceid=7; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75265191, jitterRate=0.12153874337673187}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T17:05:56,101 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a7e20bcfa36501b077598b1fda66c308 2024-12-01T17:05:56,101 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1006): Region open journal for a7e20bcfa36501b077598b1fda66c308: Running coprocessor pre-open hook at 1733072756061Writing region info on filesystem at 1733072756061Initializing all the Stores at 1733072756062 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733072756062Cleaning up temporary data from old regions at 1733072756096 (+34 ms)Running coprocessor post-open hooks at 1733072756101 (+5 ms)Region opened successfully at 1733072756101 2024-12-01T17:05:56,102 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,,1733072755258.a7e20bcfa36501b077598b1fda66c308., pid=26, masterSystemTime=1733072756054 2024-12-01T17:05:56,103 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.CompactSplit(342): Ignoring compaction request for testExportFileSystemStateWithSplitRegion,,1733072755258.a7e20bcfa36501b077598b1fda66c308.,because compaction is disabled. 2024-12-01T17:05:56,107 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,,1733072755258.a7e20bcfa36501b077598b1fda66c308. 2024-12-01T17:05:56,107 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,,1733072755258.a7e20bcfa36501b077598b1fda66c308. 2024-12-01T17:05:56,107 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,5,1733072755258.c17d7ff8c5b6ed284ece92b6d32c1363. 2024-12-01T17:05:56,108 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7752): Opening region: {ENCODED => c17d7ff8c5b6ed284ece92b6d32c1363, NAME => 'testExportFileSystemStateWithSplitRegion,5,1733072755258.c17d7ff8c5b6ed284ece92b6d32c1363.', STARTKEY => '5', ENDKEY => ''} 2024-12-01T17:05:56,108 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,5,1733072755258.c17d7ff8c5b6ed284ece92b6d32c1363. service=AccessControlService 2024-12-01T17:05:56,108 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-01T17:05:56,109 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=a7e20bcfa36501b077598b1fda66c308, regionState=OPEN, openSeqNum=7, regionLocation=b87883a7882d,41375,1733072737967 2024-12-01T17:05:56,109 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion c17d7ff8c5b6ed284ece92b6d32c1363 2024-12-01T17:05:56,109 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,5,1733072755258.c17d7ff8c5b6ed284ece92b6d32c1363.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:05:56,109 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7794): checking encryption for c17d7ff8c5b6ed284ece92b6d32c1363 2024-12-01T17:05:56,109 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7797): checking classloading for c17d7ff8c5b6ed284ece92b6d32c1363 2024-12-01T17:05:56,111 INFO [StoreOpener-c17d7ff8c5b6ed284ece92b6d32c1363-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region c17d7ff8c5b6ed284ece92b6d32c1363 2024-12-01T17:05:56,113 INFO [StoreOpener-c17d7ff8c5b6ed284ece92b6d32c1363-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c17d7ff8c5b6ed284ece92b6d32c1363 columnFamilyName cf 2024-12-01T17:05:56,113 DEBUG [StoreOpener-c17d7ff8c5b6ed284ece92b6d32c1363-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:05:56,117 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=26, ppid=24, state=RUNNABLE, hasLock=false; OpenRegionProcedure a7e20bcfa36501b077598b1fda66c308, server=b87883a7882d,41375,1733072737967 because future has completed 2024-12-01T17:05:56,126 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=26, resume processing ppid=24 2024-12-01T17:05:56,126 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=26, ppid=24, state=SUCCESS, hasLock=false; OpenRegionProcedure a7e20bcfa36501b077598b1fda66c308, server=b87883a7882d,41375,1733072737967 in 220 msec 2024-12-01T17:05:56,128 DEBUG [StoreFileOpener-c17d7ff8c5b6ed284ece92b6d32c1363-cf-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for fd41708961b547f08dc46c317e0e27c7_SeqId_4_.0e879504207e174ec4d3192326092bb1: NONE, but ROW specified in column family configuration 2024-12-01T17:05:56,132 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=24, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=a7e20bcfa36501b077598b1fda66c308, ASSIGN in 383 msec 2024-12-01T17:05:56,133 DEBUG [StoreOpener-c17d7ff8c5b6ed284ece92b6d32c1363-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/c17d7ff8c5b6ed284ece92b6d32c1363/cf/fd41708961b547f08dc46c317e0e27c7_SeqId_4_.0e879504207e174ec4d3192326092bb1->hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/0e879504207e174ec4d3192326092bb1/cf/fd41708961b547f08dc46c317e0e27c7_SeqId_4_-top 2024-12-01T17:05:56,133 INFO [StoreOpener-c17d7ff8c5b6ed284ece92b6d32c1363-1 {}] regionserver.HStore(327): Store=c17d7ff8c5b6ed284ece92b6d32c1363/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T17:05:56,133 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1038): replaying wal for c17d7ff8c5b6ed284ece92b6d32c1363 2024-12-01T17:05:56,134 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/c17d7ff8c5b6ed284ece92b6d32c1363 2024-12-01T17:05:56,136 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/c17d7ff8c5b6ed284ece92b6d32c1363 2024-12-01T17:05:56,136 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1048): stopping wal replay for c17d7ff8c5b6ed284ece92b6d32c1363 2024-12-01T17:05:56,137 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1060): Cleaning up temporary data for c17d7ff8c5b6ed284ece92b6d32c1363 2024-12-01T17:05:56,139 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1093): writing seq id for c17d7ff8c5b6ed284ece92b6d32c1363 2024-12-01T17:05:56,140 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1114): Opened c17d7ff8c5b6ed284ece92b6d32c1363; next sequenceid=7; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60493675, jitterRate=-0.09857399761676788}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T17:05:56,140 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c17d7ff8c5b6ed284ece92b6d32c1363 2024-12-01T17:05:56,140 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1006): Region open journal for c17d7ff8c5b6ed284ece92b6d32c1363: Running coprocessor pre-open hook at 1733072756109Writing region info on filesystem at 1733072756109Initializing all the Stores at 1733072756110 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733072756110Cleaning up temporary data from old regions at 1733072756137 (+27 ms)Running coprocessor post-open hooks at 1733072756140 (+3 ms)Region opened successfully at 1733072756140 2024-12-01T17:05:56,141 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,5,1733072755258.c17d7ff8c5b6ed284ece92b6d32c1363., pid=27, masterSystemTime=1733072756054 2024-12-01T17:05:56,142 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.CompactSplit(342): Ignoring compaction request for testExportFileSystemStateWithSplitRegion,5,1733072755258.c17d7ff8c5b6ed284ece92b6d32c1363.,because compaction is disabled. 2024-12-01T17:05:56,144 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,5,1733072755258.c17d7ff8c5b6ed284ece92b6d32c1363. 2024-12-01T17:05:56,144 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,5,1733072755258.c17d7ff8c5b6ed284ece92b6d32c1363. 2024-12-01T17:05:56,145 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=25 updating hbase:meta row=c17d7ff8c5b6ed284ece92b6d32c1363, regionState=OPEN, openSeqNum=7, regionLocation=b87883a7882d,41375,1733072737967 2024-12-01T17:05:56,148 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=27, ppid=25, state=RUNNABLE, hasLock=false; OpenRegionProcedure c17d7ff8c5b6ed284ece92b6d32c1363, server=b87883a7882d,41375,1733072737967 because future has completed 2024-12-01T17:05:56,154 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=27, resume processing ppid=25 2024-12-01T17:05:56,154 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=27, ppid=25, state=SUCCESS, hasLock=false; OpenRegionProcedure c17d7ff8c5b6ed284ece92b6d32c1363, server=b87883a7882d,41375,1733072737967 in 248 msec 2024-12-01T17:05:56,157 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=25, resume processing ppid=21 2024-12-01T17:05:56,157 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=25, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=c17d7ff8c5b6ed284ece92b6d32c1363, ASSIGN in 411 msec 2024-12-01T17:05:56,161 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=21, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=0e879504207e174ec4d3192326092bb1, daughterA=a7e20bcfa36501b077598b1fda66c308, daughterB=c17d7ff8c5b6ed284ece92b6d32c1363 in 898 msec 2024-12-01T17:05:56,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-01T17:05:56,410 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-01T17:05:56,410 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SPLIT_REGION, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-01T17:05:56,415 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-01T17:05:56,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733072756415 (current time:1733072756415). 2024-12-01T17:05:56,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-01T17:05:56,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-12-01T17:05:56,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-01T17:05:56,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66d44cad, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:05:56,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:05:56,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:05:56,417 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:05:56,417 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:05:56,417 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:05:56,417 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f179ac0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:05:56,417 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:05:56,417 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:05:56,418 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:05:56,419 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54380, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:05:56,419 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@392e1525, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:05:56,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:05:56,421 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:05:56,421 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:05:56,422 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36780, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:05:56,423 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737. 2024-12-01T17:05:56,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:05:56,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:05:56,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:05:56,423 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:05:56,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@19d02103, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:05:56,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:05:56,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:05:56,425 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:05:56,425 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:05:56,425 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:05:56,426 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2028dd7b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:05:56,426 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:05:56,426 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:05:56,427 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:05:56,427 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54408, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:05:56,428 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4472fa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:05:56,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:05:56,430 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:05:56,430 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:05:56,431 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36796, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:05:56,434 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:05:56,434 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:05:56,436 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45994, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:05:56,437 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737. 2024-12-01T17:05:56,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:05:56,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:05:56,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:05:56,438 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:05:56,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] access.PermissionStorage(613): Read acl: entry[testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-01T17:05:56,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-01T17:05:56,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=28, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-01T17:05:56,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 28 2024-12-01T17:05:56,441 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-01T17:05:56,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-01T17:05:56,442 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-01T17:05:56,445 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-01T17:05:56,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741864_1040 (size=197) 2024-12-01T17:05:56,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741864_1040 (size=197) 2024-12-01T17:05:56,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741864_1040 (size=197) 2024-12-01T17:05:56,456 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-01T17:05:56,456 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a7e20bcfa36501b077598b1fda66c308}, {pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c17d7ff8c5b6ed284ece92b6d32c1363}] 2024-12-01T17:05:56,457 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a7e20bcfa36501b077598b1fda66c308 2024-12-01T17:05:56,457 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c17d7ff8c5b6ed284ece92b6d32c1363 2024-12-01T17:05:56,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-01T17:05:56,610 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41375 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=29 2024-12-01T17:05:56,610 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41375 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=30 2024-12-01T17:05:56,610 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportFileSystemStateWithSplitRegion,,1733072755258.a7e20bcfa36501b077598b1fda66c308. 2024-12-01T17:05:56,611 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportFileSystemStateWithSplitRegion,5,1733072755258.c17d7ff8c5b6ed284ece92b6d32c1363. 2024-12-01T17:05:56,611 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.HRegion(2603): Flush status journal for a7e20bcfa36501b077598b1fda66c308: 2024-12-01T17:05:56,611 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.HRegion(2603): Flush status journal for c17d7ff8c5b6ed284ece92b6d32c1363: 2024-12-01T17:05:56,611 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportFileSystemStateWithSplitRegion,,1733072755258.a7e20bcfa36501b077598b1fda66c308. for snapshot-testExportFileSystemStateWithSplitRegion completed. 2024-12-01T17:05:56,611 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportFileSystemStateWithSplitRegion,5,1733072755258.c17d7ff8c5b6ed284ece92b6d32c1363. for snapshot-testExportFileSystemStateWithSplitRegion completed. 2024-12-01T17:05:56,611 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(241): Storing 'testExportFileSystemStateWithSplitRegion,5,1733072755258.c17d7ff8c5b6ed284ece92b6d32c1363.' region-info for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-01T17:05:56,611 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(241): Storing 'testExportFileSystemStateWithSplitRegion,,1733072755258.a7e20bcfa36501b077598b1fda66c308.' region-info for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-01T17:05:56,611 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:05:56,611 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:05:56,611 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/c17d7ff8c5b6ed284ece92b6d32c1363/cf/fd41708961b547f08dc46c317e0e27c7_SeqId_4_.0e879504207e174ec4d3192326092bb1->hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/0e879504207e174ec4d3192326092bb1/cf/fd41708961b547f08dc46c317e0e27c7_SeqId_4_-top] hfiles 2024-12-01T17:05:56,611 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/c17d7ff8c5b6ed284ece92b6d32c1363/cf/fd41708961b547f08dc46c317e0e27c7_SeqId_4_.0e879504207e174ec4d3192326092bb1 for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-01T17:05:56,611 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/a7e20bcfa36501b077598b1fda66c308/cf/fd41708961b547f08dc46c317e0e27c7_SeqId_4_.0e879504207e174ec4d3192326092bb1->hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/0e879504207e174ec4d3192326092bb1/cf/fd41708961b547f08dc46c317e0e27c7_SeqId_4_-bottom] hfiles 2024-12-01T17:05:56,612 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/a7e20bcfa36501b077598b1fda66c308/cf/fd41708961b547f08dc46c317e0e27c7_SeqId_4_.0e879504207e174ec4d3192326092bb1 for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-01T17:05:56,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741866_1042 (size=182) 2024-12-01T17:05:56,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741866_1042 (size=182) 2024-12-01T17:05:56,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741866_1042 (size=182) 2024-12-01T17:05:56,640 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportFileSystemStateWithSplitRegion,5,1733072755258.c17d7ff8c5b6ed284ece92b6d32c1363. 2024-12-01T17:05:56,640 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=30 2024-12-01T17:05:56,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=30 2024-12-01T17:05:56,641 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportFileSystemStateWithSplitRegion on region c17d7ff8c5b6ed284ece92b6d32c1363 2024-12-01T17:05:56,641 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c17d7ff8c5b6ed284ece92b6d32c1363 2024-12-01T17:05:56,644 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=30, ppid=28, state=SUCCESS, hasLock=false; SnapshotRegionProcedure c17d7ff8c5b6ed284ece92b6d32c1363 in 186 msec 2024-12-01T17:05:56,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741865_1041 (size=182) 2024-12-01T17:05:56,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741865_1041 (size=182) 2024-12-01T17:05:56,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741865_1041 (size=182) 2024-12-01T17:05:56,668 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportFileSystemStateWithSplitRegion,,1733072755258.a7e20bcfa36501b077598b1fda66c308. 2024-12-01T17:05:56,668 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-12-01T17:05:56,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=29 2024-12-01T17:05:56,669 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportFileSystemStateWithSplitRegion on region a7e20bcfa36501b077598b1fda66c308 2024-12-01T17:05:56,669 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a7e20bcfa36501b077598b1fda66c308 2024-12-01T17:05:56,675 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=29, resume processing ppid=28 2024-12-01T17:05:56,675 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=29, ppid=28, state=SUCCESS, hasLock=false; SnapshotRegionProcedure a7e20bcfa36501b077598b1fda66c308 in 215 msec 2024-12-01T17:05:56,676 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-01T17:05:56,678 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-01T17:05:56,678 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-01T17:05:56,678 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:05:56,682 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/0e879504207e174ec4d3192326092bb1/cf/fd41708961b547f08dc46c317e0e27c7_SeqId_4_] hfiles 2024-12-01T17:05:56,683 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/1): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/0e879504207e174ec4d3192326092bb1/cf/fd41708961b547f08dc46c317e0e27c7_SeqId_4_ 2024-12-01T17:05:56,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741867_1043 (size=129) 2024-12-01T17:05:56,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741867_1043 (size=129) 2024-12-01T17:05:56,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741867_1043 (size=129) 2024-12-01T17:05:56,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-01T17:05:57,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-01T17:05:57,112 INFO [SplitRegionsSnapshotPool-pool-0 {}] procedure.SnapshotProcedure$1(378): take snapshot region={ENCODED => 0e879504207e174ec4d3192326092bb1, NAME => 'testExportFileSystemStateWithSplitRegion,,1733072749442.0e879504207e174ec4d3192326092bb1.', STARTKEY => '', ENDKEY => '', OFFLINE => true, SPLIT => true}, table=testExportFileSystemStateWithSplitRegion 2024-12-01T17:05:57,114 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-01T17:05:57,116 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-01T17:05:57,116 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportFileSystemStateWithSplitRegion 2024-12-01T17:05:57,117 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-01T17:05:57,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741868_1044 (size=891) 2024-12-01T17:05:57,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741868_1044 (size=891) 2024-12-01T17:05:57,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741868_1044 (size=891) 2024-12-01T17:05:57,139 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-01T17:05:57,156 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-01T17:05:57,156 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-01T17:05:57,158 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-01T17:05:57,159 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 28 2024-12-01T17:05:57,162 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=28, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 720 msec 2024-12-01T17:05:57,330 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion 2024-12-01T17:05:57,330 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion Metrics about Tables on a single HBase RegionServer 2024-12-01T17:05:57,331 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion 2024-12-01T17:05:57,331 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion Metrics about Tables on a single HBase RegionServer 2024-12-01T17:05:57,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-01T17:05:57,581 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-01T17:05:57,581 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072757581 2024-12-01T17:05:57,581 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:44451, tgtDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072757581, rawTgtDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072757581, srcFsUri=hdfs://localhost:44451, srcDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:05:57,628 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:44451, inputRoot=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:05:57,628 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-69539515_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072757581, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072757581/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-01T17:05:57,632 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-01T17:05:57,640 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072757581/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-01T17:05:57,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741869_1045 (size=891) 2024-12-01T17:05:57,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741869_1045 (size=891) 2024-12-01T17:05:57,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741869_1045 (size=891) 2024-12-01T17:05:57,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741870_1046 (size=197) 2024-12-01T17:05:57,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741870_1046 (size=197) 2024-12-01T17:05:57,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741870_1046 (size=197) 2024-12-01T17:05:57,679 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:05:57,680 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:05:57,680 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:05:58,604 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/hadoop-7406981114184246830.jar 2024-12-01T17:05:58,605 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:05:58,605 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:05:58,661 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/hadoop-7108816505754219820.jar 2024-12-01T17:05:58,662 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:05:58,663 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:05:58,663 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:05:58,664 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:05:58,664 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:05:58,664 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:05:58,665 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-01T17:05:58,665 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-01T17:05:58,666 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-01T17:05:58,666 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-01T17:05:58,666 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-01T17:05:58,667 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-01T17:05:58,667 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-01T17:05:58,667 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-01T17:05:58,668 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-01T17:05:58,668 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-01T17:05:58,668 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-01T17:05:58,670 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:05:58,670 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:05:58,671 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-01T17:05:58,671 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:05:58,671 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:05:58,672 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-01T17:05:58,672 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-01T17:05:58,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741871_1047 (size=24020) 2024-12-01T17:05:58,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741871_1047 (size=24020) 2024-12-01T17:05:58,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741871_1047 (size=24020) 2024-12-01T17:05:59,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741872_1048 (size=77755) 2024-12-01T17:05:59,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741872_1048 (size=77755) 2024-12-01T17:05:59,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741872_1048 (size=77755) 2024-12-01T17:05:59,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741873_1049 (size=131360) 2024-12-01T17:05:59,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741873_1049 (size=131360) 2024-12-01T17:05:59,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741873_1049 (size=131360) 2024-12-01T17:05:59,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741874_1050 (size=111793) 2024-12-01T17:05:59,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741874_1050 (size=111793) 2024-12-01T17:05:59,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741874_1050 (size=111793) 2024-12-01T17:05:59,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741875_1051 (size=1832290) 2024-12-01T17:05:59,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741875_1051 (size=1832290) 2024-12-01T17:05:59,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741875_1051 (size=1832290) 2024-12-01T17:05:59,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741876_1052 (size=8360005) 2024-12-01T17:05:59,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741876_1052 (size=8360005) 2024-12-01T17:05:59,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741876_1052 (size=8360005) 2024-12-01T17:05:59,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741877_1053 (size=503880) 2024-12-01T17:05:59,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741877_1053 (size=503880) 2024-12-01T17:05:59,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741877_1053 (size=503880) 2024-12-01T17:05:59,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741878_1054 (size=322274) 2024-12-01T17:05:59,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741878_1054 (size=322274) 2024-12-01T17:05:59,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741878_1054 (size=322274) 2024-12-01T17:05:59,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741879_1055 (size=20406) 2024-12-01T17:05:59,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741879_1055 (size=20406) 2024-12-01T17:05:59,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741879_1055 (size=20406) 2024-12-01T17:05:59,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741880_1056 (size=45609) 2024-12-01T17:05:59,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741880_1056 (size=45609) 2024-12-01T17:05:59,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741880_1056 (size=45609) 2024-12-01T17:05:59,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741881_1057 (size=136454) 2024-12-01T17:05:59,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741881_1057 (size=136454) 2024-12-01T17:05:59,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741881_1057 (size=136454) 2024-12-01T17:05:59,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741882_1058 (size=6424742) 2024-12-01T17:05:59,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741882_1058 (size=6424742) 2024-12-01T17:05:59,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741882_1058 (size=6424742) 2024-12-01T17:05:59,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741883_1059 (size=1597136) 2024-12-01T17:05:59,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741883_1059 (size=1597136) 2024-12-01T17:05:59,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741883_1059 (size=1597136) 2024-12-01T17:05:59,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741884_1060 (size=30873) 2024-12-01T17:05:59,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741884_1060 (size=30873) 2024-12-01T17:05:59,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741884_1060 (size=30873) 2024-12-01T17:05:59,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741885_1061 (size=29229) 2024-12-01T17:05:59,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741885_1061 (size=29229) 2024-12-01T17:05:59,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741885_1061 (size=29229) 2024-12-01T17:05:59,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741886_1062 (size=903864) 2024-12-01T17:05:59,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741886_1062 (size=903864) 2024-12-01T17:05:59,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741886_1062 (size=903864) 2024-12-01T17:05:59,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741887_1063 (size=440956) 2024-12-01T17:05:59,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741887_1063 (size=440956) 2024-12-01T17:05:59,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741887_1063 (size=440956) 2024-12-01T17:05:59,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741888_1064 (size=5175431) 2024-12-01T17:05:59,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741888_1064 (size=5175431) 2024-12-01T17:05:59,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741888_1064 (size=5175431) 2024-12-01T17:05:59,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741889_1065 (size=232881) 2024-12-01T17:05:59,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741889_1065 (size=232881) 2024-12-01T17:05:59,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741889_1065 (size=232881) 2024-12-01T17:05:59,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741890_1066 (size=1323991) 2024-12-01T17:05:59,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741890_1066 (size=1323991) 2024-12-01T17:05:59,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741890_1066 (size=1323991) 2024-12-01T17:05:59,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741891_1067 (size=4695811) 2024-12-01T17:05:59,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741891_1067 (size=4695811) 2024-12-01T17:05:59,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741891_1067 (size=4695811) 2024-12-01T17:05:59,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741892_1068 (size=1877034) 2024-12-01T17:05:59,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741892_1068 (size=1877034) 2024-12-01T17:05:59,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741892_1068 (size=1877034) 2024-12-01T17:05:59,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741893_1069 (size=217555) 2024-12-01T17:05:59,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741893_1069 (size=217555) 2024-12-01T17:05:59,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741893_1069 (size=217555) 2024-12-01T17:05:59,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741894_1070 (size=4188619) 2024-12-01T17:05:59,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741894_1070 (size=4188619) 2024-12-01T17:05:59,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741894_1070 (size=4188619) 2024-12-01T17:05:59,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741895_1071 (size=127628) 2024-12-01T17:05:59,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741895_1071 (size=127628) 2024-12-01T17:05:59,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741895_1071 (size=127628) 2024-12-01T17:05:59,814 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-01T17:05:59,820 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snapshot-testExportFileSystemStateWithSplitRegion' hfile list 2024-12-01T17:05:59,826 DEBUG [Time-limited test {}] snapshot.ExportSnapshot$1(689): Skip the existing file: cf/testExportFileSystemStateWithSplitRegion=0e879504207e174ec4d3192326092bb1-fd41708961b547f08dc46c317e0e27c7_SeqId_4_. 2024-12-01T17:05:59,826 DEBUG [Time-limited test {}] snapshot.ExportSnapshot$1(689): Skip the existing file: cf/testExportFileSystemStateWithSplitRegion=0e879504207e174ec4d3192326092bb1-fd41708961b547f08dc46c317e0e27c7_SeqId_4_. 2024-12-01T17:05:59,827 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=305.6 M 2024-12-01T17:05:59,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741896_1072 (size=244) 2024-12-01T17:05:59,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741896_1072 (size=244) 2024-12-01T17:05:59,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741896_1072 (size=244) 2024-12-01T17:05:59,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741897_1073 (size=17) 2024-12-01T17:05:59,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741897_1073 (size=17) 2024-12-01T17:05:59,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741897_1073 (size=17) 2024-12-01T17:06:00,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741898_1074 (size=304135) 2024-12-01T17:06:00,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741898_1074 (size=304135) 2024-12-01T17:06:00,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741898_1074 (size=304135) 2024-12-01T17:06:00,597 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-01T17:06:00,597 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-01T17:06:00,833 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0001_000001 (auth:SIMPLE) from 127.0.0.1:54174 2024-12-01T17:06:00,969 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-01T17:06:05,857 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-01T17:06:22,404 INFO [master/b87883a7882d:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-01T17:06:22,404 INFO [master/b87883a7882d:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-01T17:06:23,004 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0001_000001 (auth:SIMPLE) from 127.0.0.1:47994 2024-12-01T17:06:23,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741899_1075 (size=349833) 2024-12-01T17:06:23,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741899_1075 (size=349833) 2024-12-01T17:06:23,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741899_1075 (size=349833) 2024-12-01T17:06:25,243 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0001_000001 (auth:SIMPLE) from 127.0.0.1:46168 2024-12-01T17:06:32,648 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1cb3ab47122820b9c6978423317f0fa3, had cached 0 bytes from a total of 8256 2024-12-01T17:06:32,651 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 12611dd65a0f3153fc731cd592206f66, had cached 0 bytes from a total of 5356 2024-12-01T17:06:33,245 WARN [DataXceiver for client DFSClient_attempt_1733072744857_0001_m_000000_0_1015123321_1 at /127.0.0.1:53688 [Receiving block BP-371634539-172.17.0.3-1733072730982:blk_1073741900_1076] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 1836ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data2/, blockId=1073741900, seqno=130 2024-12-01T17:06:33,245 WARN [DataXceiver for client DFSClient_attempt_1733072744857_0001_m_000000_0_1015123321_1 at /127.0.0.1:44792 [Receiving block BP-371634539-172.17.0.3-1733072730982:blk_1073741900_1076] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 1836ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data4/, blockId=1073741900, seqno=130 2024-12-01T17:06:33,245 WARN [DataXceiver for client DFSClient_attempt_1733072744857_0001_m_000000_0_1015123321_1 at /127.0.0.1:35882 [Receiving block BP-371634539-172.17.0.3-1733072730982:blk_1073741900_1076] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 1836ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data6/, blockId=1073741900, seqno=130 2024-12-01T17:06:35,857 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-01T17:06:41,060 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region a7e20bcfa36501b077598b1fda66c308, had cached 0 bytes from a total of 320414712 2024-12-01T17:06:41,109 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region c17d7ff8c5b6ed284ece92b6d32c1363, had cached 0 bytes from a total of 320414712 2024-12-01T17:06:41,558 DEBUG [master/b87883a7882d:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 5b10cafeb6a61c25ce1e97f8b1c04b89 changed from -1.0 to 0.0, refreshing cache 2024-12-01T17:06:41,558 DEBUG [master/b87883a7882d:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 12611dd65a0f3153fc731cd592206f66 changed from -1.0 to 0.0, refreshing cache 2024-12-01T17:06:41,558 DEBUG [master/b87883a7882d:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1cb3ab47122820b9c6978423317f0fa3 changed from -1.0 to 0.0, refreshing cache 2024-12-01T17:07:01,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741900_1076 (size=134217728) 2024-12-01T17:07:01,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741900_1076 (size=134217728) 2024-12-01T17:07:01,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741900_1076 (size=134217728) 2024-12-01T17:07:05,857 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-01T17:07:08,301 WARN [DataXceiver for client DFSClient_attempt_1733072744857_0001_m_000000_0_1015123321_1 at /127.0.0.1:43078 [Receiving block BP-371634539-172.17.0.3-1733072730982:blk_1073741901_1077] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 5059ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data3/, blockId=1073741901, seqno=2240 2024-12-01T17:07:17,649 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1cb3ab47122820b9c6978423317f0fa3, had cached 0 bytes from a total of 8256 2024-12-01T17:07:17,651 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 12611dd65a0f3153fc731cd592206f66, had cached 0 bytes from a total of 5356 2024-12-01T17:07:26,061 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region a7e20bcfa36501b077598b1fda66c308, had cached 0 bytes from a total of 320414712 2024-12-01T17:07:26,109 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region c17d7ff8c5b6ed284ece92b6d32c1363, had cached 0 bytes from a total of 320414712 2024-12-01T17:07:34,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741901_1077 (size=134217728) 2024-12-01T17:07:34,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741901_1077 (size=134217728) 2024-12-01T17:07:34,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741901_1077 (size=134217728) 2024-12-01T17:07:35,858 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-01T17:07:45,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741902_1078 (size=51979256) 2024-12-01T17:07:45,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741902_1078 (size=51979256) 2024-12-01T17:07:45,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741902_1078 (size=51979256) 2024-12-01T17:07:45,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741903_1079 (size=17520) 2024-12-01T17:07:45,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741903_1079 (size=17520) 2024-12-01T17:07:45,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741903_1079 (size=17520) 2024-12-01T17:07:45,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741904_1080 (size=482) 2024-12-01T17:07:45,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741904_1080 (size=482) 2024-12-01T17:07:45,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741904_1080 (size=482) 2024-12-01T17:07:45,491 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_2/usercache/jenkins/appcache/application_1733072744857_0001/container_1733072744857_0001_01_000002/launch_container.sh] 2024-12-01T17:07:45,491 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_2/usercache/jenkins/appcache/application_1733072744857_0001/container_1733072744857_0001_01_000002/container_tokens] 2024-12-01T17:07:45,491 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_2/usercache/jenkins/appcache/application_1733072744857_0001/container_1733072744857_0001_01_000002/sysfs] 2024-12-01T17:07:45,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741905_1081 (size=17520) 2024-12-01T17:07:45,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741905_1081 (size=17520) 2024-12-01T17:07:45,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741905_1081 (size=17520) 2024-12-01T17:07:45,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741906_1082 (size=349833) 2024-12-01T17:07:45,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741906_1082 (size=349833) 2024-12-01T17:07:45,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741906_1082 (size=349833) 2024-12-01T17:07:45,530 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0001_000001 (auth:SIMPLE) from 127.0.0.1:49550 2024-12-01T17:07:47,238 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-01T17:07:47,239 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-01T17:07:47,246 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snapshot-testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:47,246 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-01T17:07:47,247 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-01T17:07:47,247 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-69539515_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion at hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:47,247 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/.snapshotinfo 2024-12-01T17:07:47,248 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/data.manifest 2024-12-01T17:07:47,248 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-69539515_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072757581/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion at hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072757581/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:47,248 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072757581/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/.snapshotinfo 2024-12-01T17:07:47,248 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072757581/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/data.manifest 2024-12-01T17:07:47,262 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:47,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=31, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:47,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-01T17:07:47,272 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733072867271"}]},"ts":"1733072867271"} 2024-12-01T17:07:47,274 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=DISABLING in hbase:meta 2024-12-01T17:07:47,274 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testExportFileSystemStateWithSplitRegion to state=DISABLING 2024-12-01T17:07:47,276 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=32, ppid=31, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportFileSystemStateWithSplitRegion}] 2024-12-01T17:07:47,283 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=a7e20bcfa36501b077598b1fda66c308, UNASSIGN}, {pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=c17d7ff8c5b6ed284ece92b6d32c1363, UNASSIGN}] 2024-12-01T17:07:47,284 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=a7e20bcfa36501b077598b1fda66c308, UNASSIGN 2024-12-01T17:07:47,284 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=c17d7ff8c5b6ed284ece92b6d32c1363, UNASSIGN 2024-12-01T17:07:47,285 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=c17d7ff8c5b6ed284ece92b6d32c1363, regionState=CLOSING, regionLocation=b87883a7882d,41375,1733072737967 2024-12-01T17:07:47,285 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=33 updating hbase:meta row=a7e20bcfa36501b077598b1fda66c308, regionState=CLOSING, regionLocation=b87883a7882d,41375,1733072737967 2024-12-01T17:07:47,287 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=a7e20bcfa36501b077598b1fda66c308, UNASSIGN because future has completed 2024-12-01T17:07:47,288 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-01T17:07:47,288 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=35, ppid=33, state=RUNNABLE, hasLock=false; CloseRegionProcedure a7e20bcfa36501b077598b1fda66c308, server=b87883a7882d,41375,1733072737967}] 2024-12-01T17:07:47,288 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=c17d7ff8c5b6ed284ece92b6d32c1363, UNASSIGN because future has completed 2024-12-01T17:07:47,289 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-01T17:07:47,289 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=36, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure c17d7ff8c5b6ed284ece92b6d32c1363, server=b87883a7882d,41375,1733072737967}] 2024-12-01T17:07:47,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-01T17:07:47,441 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(122): Close a7e20bcfa36501b077598b1fda66c308 2024-12-01T17:07:47,441 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-01T17:07:47,442 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1722): Closing a7e20bcfa36501b077598b1fda66c308, disabling compactions & flushes 2024-12-01T17:07:47,442 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1733072755258.a7e20bcfa36501b077598b1fda66c308. 2024-12-01T17:07:47,442 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1733072755258.a7e20bcfa36501b077598b1fda66c308. 2024-12-01T17:07:47,442 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1733072755258.a7e20bcfa36501b077598b1fda66c308. after waiting 0 ms 2024-12-01T17:07:47,442 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1733072755258.a7e20bcfa36501b077598b1fda66c308. 2024-12-01T17:07:47,446 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/a7e20bcfa36501b077598b1fda66c308/recovered.edits/10.seqid, newMaxSeqId=10, maxSeqId=6 2024-12-01T17:07:47,447 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-01T17:07:47,447 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1733072755258.a7e20bcfa36501b077598b1fda66c308. 2024-12-01T17:07:47,447 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1676): Region close journal for a7e20bcfa36501b077598b1fda66c308: Waiting for close lock at 1733072867442Running coprocessor pre-close hooks at 1733072867442Disabling compacts and flushes for region at 1733072867442Disabling writes for close at 1733072867442Writing region close event to WAL at 1733072867443 (+1 ms)Running coprocessor post-close hooks at 1733072867447 (+4 ms)Closed at 1733072867447 2024-12-01T17:07:47,450 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(157): Closed a7e20bcfa36501b077598b1fda66c308 2024-12-01T17:07:47,450 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(122): Close c17d7ff8c5b6ed284ece92b6d32c1363 2024-12-01T17:07:47,450 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-01T17:07:47,450 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1722): Closing c17d7ff8c5b6ed284ece92b6d32c1363, disabling compactions & flushes 2024-12-01T17:07:47,450 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,5,1733072755258.c17d7ff8c5b6ed284ece92b6d32c1363. 2024-12-01T17:07:47,451 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,5,1733072755258.c17d7ff8c5b6ed284ece92b6d32c1363. 2024-12-01T17:07:47,451 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,5,1733072755258.c17d7ff8c5b6ed284ece92b6d32c1363. after waiting 0 ms 2024-12-01T17:07:47,451 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,5,1733072755258.c17d7ff8c5b6ed284ece92b6d32c1363. 2024-12-01T17:07:47,451 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=33 updating hbase:meta row=a7e20bcfa36501b077598b1fda66c308, regionState=CLOSED 2024-12-01T17:07:47,453 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=35, ppid=33, state=RUNNABLE, hasLock=false; CloseRegionProcedure a7e20bcfa36501b077598b1fda66c308, server=b87883a7882d,41375,1733072737967 because future has completed 2024-12-01T17:07:47,456 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/c17d7ff8c5b6ed284ece92b6d32c1363/recovered.edits/10.seqid, newMaxSeqId=10, maxSeqId=6 2024-12-01T17:07:47,456 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-01T17:07:47,457 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,5,1733072755258.c17d7ff8c5b6ed284ece92b6d32c1363. 2024-12-01T17:07:47,457 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1676): Region close journal for c17d7ff8c5b6ed284ece92b6d32c1363: Waiting for close lock at 1733072867450Running coprocessor pre-close hooks at 1733072867450Disabling compacts and flushes for region at 1733072867450Disabling writes for close at 1733072867451 (+1 ms)Writing region close event to WAL at 1733072867451Running coprocessor post-close hooks at 1733072867456 (+5 ms)Closed at 1733072867457 (+1 ms) 2024-12-01T17:07:47,457 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=35, resume processing ppid=33 2024-12-01T17:07:47,457 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=35, ppid=33, state=SUCCESS, hasLock=false; CloseRegionProcedure a7e20bcfa36501b077598b1fda66c308, server=b87883a7882d,41375,1733072737967 in 166 msec 2024-12-01T17:07:47,459 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=33, ppid=32, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=a7e20bcfa36501b077598b1fda66c308, UNASSIGN in 174 msec 2024-12-01T17:07:47,459 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(157): Closed c17d7ff8c5b6ed284ece92b6d32c1363 2024-12-01T17:07:47,459 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=c17d7ff8c5b6ed284ece92b6d32c1363, regionState=CLOSED 2024-12-01T17:07:47,461 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=36, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure c17d7ff8c5b6ed284ece92b6d32c1363, server=b87883a7882d,41375,1733072737967 because future has completed 2024-12-01T17:07:47,464 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=36, resume processing ppid=34 2024-12-01T17:07:47,464 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=36, ppid=34, state=SUCCESS, hasLock=false; CloseRegionProcedure c17d7ff8c5b6ed284ece92b6d32c1363, server=b87883a7882d,41375,1733072737967 in 173 msec 2024-12-01T17:07:47,467 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=34, resume processing ppid=32 2024-12-01T17:07:47,467 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=34, ppid=32, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=c17d7ff8c5b6ed284ece92b6d32c1363, UNASSIGN in 181 msec 2024-12-01T17:07:47,469 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=32, resume processing ppid=31 2024-12-01T17:07:47,470 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=32, ppid=31, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportFileSystemStateWithSplitRegion in 192 msec 2024-12-01T17:07:47,471 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733072867471"}]},"ts":"1733072867471"} 2024-12-01T17:07:47,473 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=DISABLED in hbase:meta 2024-12-01T17:07:47,473 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testExportFileSystemStateWithSplitRegion to state=DISABLED 2024-12-01T17:07:47,475 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=31, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportFileSystemStateWithSplitRegion in 210 msec 2024-12-01T17:07:47,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-01T17:07:47,591 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-01T17:07:47,594 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:47,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=37, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:47,599 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=37, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:47,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] access.PermissionStorage(261): Removing permissions of removed table testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:47,601 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=37, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:47,604 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36521 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:47,611 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/0e879504207e174ec4d3192326092bb1 2024-12-01T17:07:47,611 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/a7e20bcfa36501b077598b1fda66c308 2024-12-01T17:07:47,611 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/c17d7ff8c5b6ed284ece92b6d32c1363 2024-12-01T17:07:47,614 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/c17d7ff8c5b6ed284ece92b6d32c1363/cf, FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/c17d7ff8c5b6ed284ece92b6d32c1363/recovered.edits] 2024-12-01T17:07:47,614 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/a7e20bcfa36501b077598b1fda66c308/cf, FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/a7e20bcfa36501b077598b1fda66c308/recovered.edits] 2024-12-01T17:07:47,614 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/0e879504207e174ec4d3192326092bb1/cf, FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/0e879504207e174ec4d3192326092bb1/recovered.edits] 2024-12-01T17:07:47,620 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/c17d7ff8c5b6ed284ece92b6d32c1363/cf/fd41708961b547f08dc46c317e0e27c7_SeqId_4_.0e879504207e174ec4d3192326092bb1 to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testExportFileSystemStateWithSplitRegion/c17d7ff8c5b6ed284ece92b6d32c1363/cf/fd41708961b547f08dc46c317e0e27c7_SeqId_4_.0e879504207e174ec4d3192326092bb1 2024-12-01T17:07:47,621 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/0e879504207e174ec4d3192326092bb1/cf/fd41708961b547f08dc46c317e0e27c7_SeqId_4_ to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testExportFileSystemStateWithSplitRegion/0e879504207e174ec4d3192326092bb1/cf/fd41708961b547f08dc46c317e0e27c7_SeqId_4_ 2024-12-01T17:07:47,621 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/a7e20bcfa36501b077598b1fda66c308/cf/fd41708961b547f08dc46c317e0e27c7_SeqId_4_.0e879504207e174ec4d3192326092bb1 to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testExportFileSystemStateWithSplitRegion/a7e20bcfa36501b077598b1fda66c308/cf/fd41708961b547f08dc46c317e0e27c7_SeqId_4_.0e879504207e174ec4d3192326092bb1 2024-12-01T17:07:47,624 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/c17d7ff8c5b6ed284ece92b6d32c1363/recovered.edits/10.seqid to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testExportFileSystemStateWithSplitRegion/c17d7ff8c5b6ed284ece92b6d32c1363/recovered.edits/10.seqid 2024-12-01T17:07:47,624 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/0e879504207e174ec4d3192326092bb1/recovered.edits/6.seqid to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testExportFileSystemStateWithSplitRegion/0e879504207e174ec4d3192326092bb1/recovered.edits/6.seqid 2024-12-01T17:07:47,624 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/a7e20bcfa36501b077598b1fda66c308/recovered.edits/10.seqid to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testExportFileSystemStateWithSplitRegion/a7e20bcfa36501b077598b1fda66c308/recovered.edits/10.seqid 2024-12-01T17:07:47,624 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/c17d7ff8c5b6ed284ece92b6d32c1363 2024-12-01T17:07:47,624 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/0e879504207e174ec4d3192326092bb1 2024-12-01T17:07:47,625 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportFileSystemStateWithSplitRegion/a7e20bcfa36501b077598b1fda66c308 2024-12-01T17:07:47,625 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testExportFileSystemStateWithSplitRegion regions 2024-12-01T17:07:47,627 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=37, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:47,631 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44753 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-01T17:07:47,636 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 3 rows of testExportFileSystemStateWithSplitRegion from hbase:meta 2024-12-01T17:07:47,640 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testExportFileSystemStateWithSplitRegion' descriptor. 2024-12-01T17:07:47,642 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=37, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:47,643 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testExportFileSystemStateWithSplitRegion' from region states. 2024-12-01T17:07:47,643 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,,1733072749442.0e879504207e174ec4d3192326092bb1.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733072867643"}]},"ts":"9223372036854775807"} 2024-12-01T17:07:47,643 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,,1733072755258.a7e20bcfa36501b077598b1fda66c308.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733072867643"}]},"ts":"9223372036854775807"} 2024-12-01T17:07:47,643 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,5,1733072755258.c17d7ff8c5b6ed284ece92b6d32c1363.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733072867643"}]},"ts":"9223372036854775807"} 2024-12-01T17:07:47,647 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 3 regions from META 2024-12-01T17:07:47,647 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 0e879504207e174ec4d3192326092bb1, NAME => 'testExportFileSystemStateWithSplitRegion,,1733072749442.0e879504207e174ec4d3192326092bb1.', STARTKEY => '', ENDKEY => ''}, {ENCODED => a7e20bcfa36501b077598b1fda66c308, NAME => 'testExportFileSystemStateWithSplitRegion,,1733072755258.a7e20bcfa36501b077598b1fda66c308.', STARTKEY => '', ENDKEY => '5'}, {ENCODED => c17d7ff8c5b6ed284ece92b6d32c1363, NAME => 'testExportFileSystemStateWithSplitRegion,5,1733072755258.c17d7ff8c5b6ed284ece92b6d32c1363.', STARTKEY => '5', ENDKEY => ''}] 2024-12-01T17:07:47,648 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testExportFileSystemStateWithSplitRegion' as deleted. 2024-12-01T17:07:47,648 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733072867648"}]},"ts":"9223372036854775807"} 2024-12-01T17:07:47,651 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testExportFileSystemStateWithSplitRegion state from META 2024-12-01T17:07:47,652 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=37, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:47,654 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=37, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion in 58 msec 2024-12-01T17:07:48,360 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:48,360 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:48,360 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:48,360 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:48,361 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-01T17:07:48,361 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-01T17:07:48,361 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-01T17:07:48,361 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-01T17:07:48,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:48,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:48,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:48,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:07:48,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:48,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:07:48,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:07:48,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:07:48,458 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-01T17:07:48,458 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-01T17:07:48,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=37 2024-12-01T17:07:48,458 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-01T17:07:48,458 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-01T17:07:48,459 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:48,460 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-01T17:07:48,460 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testtb-testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:50,790 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-69539515_22 at /127.0.0.1:36134 [Receiving block BP-371634539-172.17.0.3-1733072730982:blk_1073741830_1006] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 2328ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data2/, blockId=1073741830, seqno=321 2024-12-01T17:07:50,790 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-69539515_22 at /127.0.0.1:45884 [Receiving block BP-371634539-172.17.0.3-1733072730982:blk_1073741830_1006] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 2328ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data4/, blockId=1073741830, seqno=321 2024-12-01T17:07:50,790 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-69539515_22 at /127.0.0.1:48312 [Receiving block BP-371634539-172.17.0.3-1733072730982:blk_1073741830_1006] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 2328ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data6/, blockId=1073741830, seqno=321 2024-12-01T17:07:50,791 INFO [AsyncFSWAL-0-hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/MasterData-prefix:b87883a7882d,38737,1733072736902 {}] wal.AbstractFSWAL(1368): Slow sync cost: 2330 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36379,DS-66fd9c32-3ad4-437a-8b5c-d300efc8d3c7,DISK], DatanodeInfoWithStorage[127.0.0.1:44603,DS-b6e128c0-a4bb-411c-90ad-04cc93436444,DISK], DatanodeInfoWithStorage[127.0.0.1:36291,DS-77e20430-c1a6-42bb-bd62-c245d594c058,DISK]] 2024-12-01T17:07:50,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=38, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:50,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-12-01T17:07:50,797 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733072870797"}]},"ts":"1733072870797"} 2024-12-01T17:07:50,800 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=DISABLING in hbase:meta 2024-12-01T17:07:50,800 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSplitRegion to state=DISABLING 2024-12-01T17:07:50,801 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSplitRegion}] 2024-12-01T17:07:50,803 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=12611dd65a0f3153fc731cd592206f66, UNASSIGN}, {pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=1cb3ab47122820b9c6978423317f0fa3, UNASSIGN}] 2024-12-01T17:07:50,804 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=1cb3ab47122820b9c6978423317f0fa3, UNASSIGN 2024-12-01T17:07:50,805 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=12611dd65a0f3153fc731cd592206f66, UNASSIGN 2024-12-01T17:07:50,806 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=41 updating hbase:meta row=1cb3ab47122820b9c6978423317f0fa3, regionState=CLOSING, regionLocation=b87883a7882d,44753,1733072737741 2024-12-01T17:07:50,806 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=12611dd65a0f3153fc731cd592206f66, regionState=CLOSING, regionLocation=b87883a7882d,41375,1733072737967 2024-12-01T17:07:50,808 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=1cb3ab47122820b9c6978423317f0fa3, UNASSIGN because future has completed 2024-12-01T17:07:50,808 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-01T17:07:50,808 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1cb3ab47122820b9c6978423317f0fa3, server=b87883a7882d,44753,1733072737741}] 2024-12-01T17:07:50,809 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=12611dd65a0f3153fc731cd592206f66, UNASSIGN because future has completed 2024-12-01T17:07:50,810 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-01T17:07:50,810 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=43, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure 12611dd65a0f3153fc731cd592206f66, server=b87883a7882d,41375,1733072737967}] 2024-12-01T17:07:50,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-12-01T17:07:50,964 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34417, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-01T17:07:50,965 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(122): Close 12611dd65a0f3153fc731cd592206f66 2024-12-01T17:07:50,965 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-01T17:07:50,965 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(122): Close 1cb3ab47122820b9c6978423317f0fa3 2024-12-01T17:07:50,965 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-01T17:07:50,965 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1722): Closing 12611dd65a0f3153fc731cd592206f66, disabling compactions & flushes 2024-12-01T17:07:50,965 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,,1733072746798.12611dd65a0f3153fc731cd592206f66. 2024-12-01T17:07:50,965 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1722): Closing 1cb3ab47122820b9c6978423317f0fa3, disabling compactions & flushes 2024-12-01T17:07:50,965 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733072746798.12611dd65a0f3153fc731cd592206f66. 2024-12-01T17:07:50,965 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,1,1733072746798.1cb3ab47122820b9c6978423317f0fa3. 2024-12-01T17:07:50,965 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733072746798.12611dd65a0f3153fc731cd592206f66. after waiting 0 ms 2024-12-01T17:07:50,965 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733072746798.1cb3ab47122820b9c6978423317f0fa3. 2024-12-01T17:07:50,965 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,,1733072746798.12611dd65a0f3153fc731cd592206f66. 2024-12-01T17:07:50,965 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733072746798.1cb3ab47122820b9c6978423317f0fa3. after waiting 0 ms 2024-12-01T17:07:50,965 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,1,1733072746798.1cb3ab47122820b9c6978423317f0fa3. 2024-12-01T17:07:50,970 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSplitRegion/12611dd65a0f3153fc731cd592206f66/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-01T17:07:50,970 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSplitRegion/1cb3ab47122820b9c6978423317f0fa3/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-01T17:07:50,971 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-01T17:07:50,971 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-01T17:07:50,971 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,,1733072746798.12611dd65a0f3153fc731cd592206f66. 2024-12-01T17:07:50,971 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,1,1733072746798.1cb3ab47122820b9c6978423317f0fa3. 2024-12-01T17:07:50,971 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1676): Region close journal for 12611dd65a0f3153fc731cd592206f66: Waiting for close lock at 1733072870965Running coprocessor pre-close hooks at 1733072870965Disabling compacts and flushes for region at 1733072870965Disabling writes for close at 1733072870965Writing region close event to WAL at 1733072870966 (+1 ms)Running coprocessor post-close hooks at 1733072870971 (+5 ms)Closed at 1733072870971 2024-12-01T17:07:50,971 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1676): Region close journal for 1cb3ab47122820b9c6978423317f0fa3: Waiting for close lock at 1733072870965Running coprocessor pre-close hooks at 1733072870965Disabling compacts and flushes for region at 1733072870965Disabling writes for close at 1733072870965Writing region close event to WAL at 1733072870966 (+1 ms)Running coprocessor post-close hooks at 1733072870971 (+5 ms)Closed at 1733072870971 2024-12-01T17:07:50,973 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(157): Closed 12611dd65a0f3153fc731cd592206f66 2024-12-01T17:07:50,974 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=12611dd65a0f3153fc731cd592206f66, regionState=CLOSED 2024-12-01T17:07:50,974 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(157): Closed 1cb3ab47122820b9c6978423317f0fa3 2024-12-01T17:07:50,974 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=41 updating hbase:meta row=1cb3ab47122820b9c6978423317f0fa3, regionState=CLOSED 2024-12-01T17:07:50,975 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=43, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure 12611dd65a0f3153fc731cd592206f66, server=b87883a7882d,41375,1733072737967 because future has completed 2024-12-01T17:07:50,976 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=42, ppid=41, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1cb3ab47122820b9c6978423317f0fa3, server=b87883a7882d,44753,1733072737741 because future has completed 2024-12-01T17:07:50,979 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=43, resume processing ppid=40 2024-12-01T17:07:50,979 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=43, ppid=40, state=SUCCESS, hasLock=false; CloseRegionProcedure 12611dd65a0f3153fc731cd592206f66, server=b87883a7882d,41375,1733072737967 in 167 msec 2024-12-01T17:07:50,980 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=42, resume processing ppid=41 2024-12-01T17:07:50,980 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=42, ppid=41, state=SUCCESS, hasLock=false; CloseRegionProcedure 1cb3ab47122820b9c6978423317f0fa3, server=b87883a7882d,44753,1733072737741 in 169 msec 2024-12-01T17:07:50,981 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=40, ppid=39, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=12611dd65a0f3153fc731cd592206f66, UNASSIGN in 176 msec 2024-12-01T17:07:50,982 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=41, resume processing ppid=39 2024-12-01T17:07:50,982 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=41, ppid=39, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=1cb3ab47122820b9c6978423317f0fa3, UNASSIGN in 177 msec 2024-12-01T17:07:50,984 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=39, resume processing ppid=38 2024-12-01T17:07:50,984 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=39, ppid=38, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 182 msec 2024-12-01T17:07:50,985 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733072870985"}]},"ts":"1733072870985"} 2024-12-01T17:07:50,987 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=DISABLED in hbase:meta 2024-12-01T17:07:50,987 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSplitRegion to state=DISABLED 2024-12-01T17:07:50,993 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=38, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 2.5280 sec 2024-12-01T17:07:51,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-12-01T17:07:51,111 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-01T17:07:51,112 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testtb-testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:51,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=44, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:51,118 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=44, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:51,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:51,121 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=44, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:51,123 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36521 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:51,125 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSplitRegion/12611dd65a0f3153fc731cd592206f66 2024-12-01T17:07:51,125 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSplitRegion/1cb3ab47122820b9c6978423317f0fa3 2024-12-01T17:07:51,128 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSplitRegion/1cb3ab47122820b9c6978423317f0fa3/cf, FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSplitRegion/1cb3ab47122820b9c6978423317f0fa3/recovered.edits] 2024-12-01T17:07:51,128 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSplitRegion/12611dd65a0f3153fc731cd592206f66/cf, FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSplitRegion/12611dd65a0f3153fc731cd592206f66/recovered.edits] 2024-12-01T17:07:51,132 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSplitRegion/1cb3ab47122820b9c6978423317f0fa3/cf/ae21be71d7b540b2a104ed497784474b to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/1cb3ab47122820b9c6978423317f0fa3/cf/ae21be71d7b540b2a104ed497784474b 2024-12-01T17:07:51,132 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSplitRegion/12611dd65a0f3153fc731cd592206f66/cf/13af3a19c8aa42c0aca603c48f410e83 to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/12611dd65a0f3153fc731cd592206f66/cf/13af3a19c8aa42c0aca603c48f410e83 2024-12-01T17:07:51,134 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSplitRegion/1cb3ab47122820b9c6978423317f0fa3/recovered.edits/9.seqid to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/1cb3ab47122820b9c6978423317f0fa3/recovered.edits/9.seqid 2024-12-01T17:07:51,134 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSplitRegion/12611dd65a0f3153fc731cd592206f66/recovered.edits/9.seqid to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/12611dd65a0f3153fc731cd592206f66/recovered.edits/9.seqid 2024-12-01T17:07:51,135 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSplitRegion/12611dd65a0f3153fc731cd592206f66 2024-12-01T17:07:51,135 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSplitRegion/1cb3ab47122820b9c6978423317f0fa3 2024-12-01T17:07:51,135 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSplitRegion regions 2024-12-01T17:07:51,137 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=44, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:51,141 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSplitRegion from hbase:meta 2024-12-01T17:07:51,144 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithSplitRegion' descriptor. 2024-12-01T17:07:51,145 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=44, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:51,145 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithSplitRegion' from region states. 2024-12-01T17:07:51,145 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion,,1733072746798.12611dd65a0f3153fc731cd592206f66.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733072871145"}]},"ts":"9223372036854775807"} 2024-12-01T17:07:51,145 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion,1,1733072746798.1cb3ab47122820b9c6978423317f0fa3.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733072871145"}]},"ts":"9223372036854775807"} 2024-12-01T17:07:51,147 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-01T17:07:51,147 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 12611dd65a0f3153fc731cd592206f66, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1733072746798.12611dd65a0f3153fc731cd592206f66.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 1cb3ab47122820b9c6978423317f0fa3, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1733072746798.1cb3ab47122820b9c6978423317f0fa3.', STARTKEY => '1', ENDKEY => ''}] 2024-12-01T17:07:51,148 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithSplitRegion' as deleted. 2024-12-01T17:07:51,148 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733072871148"}]},"ts":"9223372036854775807"} 2024-12-01T17:07:51,150 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSplitRegion state from META 2024-12-01T17:07:51,151 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=44, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:51,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:51,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:51,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:51,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:51,152 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=44, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 38 msec 2024-12-01T17:07:51,153 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-01T17:07:51,153 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-01T17:07:51,153 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-01T17:07:51,153 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-01T17:07:51,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:51,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:51,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:51,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:51,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:07:51,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:07:51,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:07:51,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:07:51,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-12-01T17:07:51,164 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:51,164 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-01T17:07:51,183 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "emptySnaptb0-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-12-01T17:07:51,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:51,188 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snapshot-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-12-01T17:07:51,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:51,192 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb0-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-12-01T17:07:51,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:51,220 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSplitRegion Thread=765 (was 723) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/b87883a7882d:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.read1(BufferedReader.java:213) java.base@17.0.11/java.io.BufferedReader.read(BufferedReader.java:287) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.parseExecResult(Shell.java:1295) app//org.apache.hadoop.util.Shell.runCommand(Shell.java:1054) app//org.apache.hadoop.util.Shell.run(Shell.java:959) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1282) app//org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:349) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.launchContainer(ContainerLaunch.java:600) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:388) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:105) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Container metrics unregistration java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RSProcedureDispatcher-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/b87883a7882d:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-69539515_22 at /127.0.0.1:36140 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45395 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1396781562) connection to localhost/127.0.0.1:45395 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/b87883a7882d:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 120643) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-69539515_22 at /127.0.0.1:53004 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-1377 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-69539515_22 at /127.0.0.1:38072 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-4-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/b87883a7882d:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/b87883a7882d:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=791 (was 782) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=450 (was 286) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=2362 (was 8076) 2024-12-01T17:07:51,221 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=765 is superior to 500 2024-12-01T17:07:51,237 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=765, OpenFileDescriptor=791, MaxFileDescriptor=1048576, SystemLoadAverage=450, ProcessCount=17, AvailableMemoryMB=2361 2024-12-01T17:07:51,237 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=765 is superior to 500 2024-12-01T17:07:51,239 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T17:07:51,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=45, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName 2024-12-01T17:07:51,242 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_PRE_OPERATION 2024-12-01T17:07:51,242 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:07:51,242 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithTargetName" procId is: 45 2024-12-01T17:07:51,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-01T17:07:51,243 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-01T17:07:51,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741907_1083 (size=406) 2024-12-01T17:07:51,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741907_1083 (size=406) 2024-12-01T17:07:51,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741907_1083 (size=406) 2024-12-01T17:07:51,254 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => cb2907d900a67f22cb4f023c397230bf, NAME => 'testtb-testExportWithTargetName,,1733072871239.cb2907d900a67f22cb4f023c397230bf.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:07:51,254 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => b369e749f12bbc3534a96e5f94df1c66, NAME => 'testtb-testExportWithTargetName,1,1733072871239.b369e749f12bbc3534a96e5f94df1c66.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:07:51,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741908_1084 (size=67) 2024-12-01T17:07:51,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741908_1084 (size=67) 2024-12-01T17:07:51,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741909_1085 (size=67) 2024-12-01T17:07:51,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741909_1085 (size=67) 2024-12-01T17:07:51,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741908_1084 (size=67) 2024-12-01T17:07:51,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741909_1085 (size=67) 2024-12-01T17:07:51,264 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1733072871239.b369e749f12bbc3534a96e5f94df1c66.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:07:51,264 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1722): Closing b369e749f12bbc3534a96e5f94df1c66, disabling compactions & flushes 2024-12-01T17:07:51,264 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1733072871239.b369e749f12bbc3534a96e5f94df1c66. 2024-12-01T17:07:51,264 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1733072871239.b369e749f12bbc3534a96e5f94df1c66. 2024-12-01T17:07:51,264 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1733072871239.b369e749f12bbc3534a96e5f94df1c66. after waiting 0 ms 2024-12-01T17:07:51,264 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1733072871239.b369e749f12bbc3534a96e5f94df1c66. 2024-12-01T17:07:51,264 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1733072871239.b369e749f12bbc3534a96e5f94df1c66. 2024-12-01T17:07:51,264 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1676): Region close journal for b369e749f12bbc3534a96e5f94df1c66: Waiting for close lock at 1733072871264Disabling compacts and flushes for region at 1733072871264Disabling writes for close at 1733072871264Writing region close event to WAL at 1733072871264Closed at 1733072871264 2024-12-01T17:07:51,265 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1733072871239.cb2907d900a67f22cb4f023c397230bf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:07:51,265 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1722): Closing cb2907d900a67f22cb4f023c397230bf, disabling compactions & flushes 2024-12-01T17:07:51,265 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1733072871239.cb2907d900a67f22cb4f023c397230bf. 2024-12-01T17:07:51,265 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1733072871239.cb2907d900a67f22cb4f023c397230bf. 2024-12-01T17:07:51,265 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1733072871239.cb2907d900a67f22cb4f023c397230bf. after waiting 0 ms 2024-12-01T17:07:51,265 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1733072871239.cb2907d900a67f22cb4f023c397230bf. 2024-12-01T17:07:51,265 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1733072871239.cb2907d900a67f22cb4f023c397230bf. 2024-12-01T17:07:51,265 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1676): Region close journal for cb2907d900a67f22cb4f023c397230bf: Waiting for close lock at 1733072871265Disabling compacts and flushes for region at 1733072871265Disabling writes for close at 1733072871265Writing region close event to WAL at 1733072871265Closed at 1733072871265 2024-12-01T17:07:51,266 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ADD_TO_META 2024-12-01T17:07:51,267 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,1,1733072871239.b369e749f12bbc3534a96e5f94df1c66.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1733072871266"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733072871266"}]},"ts":"1733072871266"} 2024-12-01T17:07:51,267 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,,1733072871239.cb2907d900a67f22cb4f023c397230bf.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1733072871266"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733072871266"}]},"ts":"1733072871266"} 2024-12-01T17:07:51,269 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-01T17:07:51,270 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-01T17:07:51,270 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733072871270"}]},"ts":"1733072871270"} 2024-12-01T17:07:51,271 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLING in hbase:meta 2024-12-01T17:07:51,272 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {b87883a7882d=0} racks are {/default-rack=0} 2024-12-01T17:07:51,273 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-01T17:07:51,273 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-01T17:07:51,273 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-01T17:07:51,273 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-01T17:07:51,273 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-01T17:07:51,273 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-01T17:07:51,273 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-01T17:07:51,273 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-01T17:07:51,273 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-01T17:07:51,273 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-01T17:07:51,273 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=cb2907d900a67f22cb4f023c397230bf, ASSIGN}, {pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=b369e749f12bbc3534a96e5f94df1c66, ASSIGN}] 2024-12-01T17:07:51,275 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=b369e749f12bbc3534a96e5f94df1c66, ASSIGN 2024-12-01T17:07:51,275 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=cb2907d900a67f22cb4f023c397230bf, ASSIGN 2024-12-01T17:07:51,276 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=cb2907d900a67f22cb4f023c397230bf, ASSIGN; state=OFFLINE, location=b87883a7882d,44753,1733072737741; forceNewPlan=false, retain=false 2024-12-01T17:07:51,276 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=b369e749f12bbc3534a96e5f94df1c66, ASSIGN; state=OFFLINE, location=b87883a7882d,36521,1733072737896; forceNewPlan=false, retain=false 2024-12-01T17:07:51,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-01T17:07:51,426 INFO [b87883a7882d:38737 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-01T17:07:51,427 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=cb2907d900a67f22cb4f023c397230bf, regionState=OPENING, regionLocation=b87883a7882d,44753,1733072737741 2024-12-01T17:07:51,427 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=b369e749f12bbc3534a96e5f94df1c66, regionState=OPENING, regionLocation=b87883a7882d,36521,1733072737896 2024-12-01T17:07:51,430 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=cb2907d900a67f22cb4f023c397230bf, ASSIGN because future has completed 2024-12-01T17:07:51,431 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=48, ppid=46, state=RUNNABLE, hasLock=false; OpenRegionProcedure cb2907d900a67f22cb4f023c397230bf, server=b87883a7882d,44753,1733072737741}] 2024-12-01T17:07:51,432 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=b369e749f12bbc3534a96e5f94df1c66, ASSIGN because future has completed 2024-12-01T17:07:51,433 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=49, ppid=47, state=RUNNABLE, hasLock=false; OpenRegionProcedure b369e749f12bbc3534a96e5f94df1c66, server=b87883a7882d,36521,1733072737896}] 2024-12-01T17:07:51,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-01T17:07:51,587 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36079, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-01T17:07:51,588 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,,1733072871239.cb2907d900a67f22cb4f023c397230bf. 2024-12-01T17:07:51,588 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7752): Opening region: {ENCODED => cb2907d900a67f22cb4f023c397230bf, NAME => 'testtb-testExportWithTargetName,,1733072871239.cb2907d900a67f22cb4f023c397230bf.', STARTKEY => '', ENDKEY => '1'} 2024-12-01T17:07:51,589 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,,1733072871239.cb2907d900a67f22cb4f023c397230bf. service=AccessControlService 2024-12-01T17:07:51,589 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-01T17:07:51,589 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName cb2907d900a67f22cb4f023c397230bf 2024-12-01T17:07:51,589 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1733072871239.cb2907d900a67f22cb4f023c397230bf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:07:51,590 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7794): checking encryption for cb2907d900a67f22cb4f023c397230bf 2024-12-01T17:07:51,590 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7797): checking classloading for cb2907d900a67f22cb4f023c397230bf 2024-12-01T17:07:51,592 INFO [StoreOpener-cb2907d900a67f22cb4f023c397230bf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region cb2907d900a67f22cb4f023c397230bf 2024-12-01T17:07:51,594 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,1,1733072871239.b369e749f12bbc3534a96e5f94df1c66. 2024-12-01T17:07:51,594 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7752): Opening region: {ENCODED => b369e749f12bbc3534a96e5f94df1c66, NAME => 'testtb-testExportWithTargetName,1,1733072871239.b369e749f12bbc3534a96e5f94df1c66.', STARTKEY => '1', ENDKEY => ''} 2024-12-01T17:07:51,595 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,1,1733072871239.b369e749f12bbc3534a96e5f94df1c66. service=AccessControlService 2024-12-01T17:07:51,595 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-01T17:07:51,595 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName b369e749f12bbc3534a96e5f94df1c66 2024-12-01T17:07:51,595 INFO [StoreOpener-cb2907d900a67f22cb4f023c397230bf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cb2907d900a67f22cb4f023c397230bf columnFamilyName cf 2024-12-01T17:07:51,595 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1733072871239.b369e749f12bbc3534a96e5f94df1c66.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:07:51,595 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7794): checking encryption for b369e749f12bbc3534a96e5f94df1c66 2024-12-01T17:07:51,595 DEBUG [StoreOpener-cb2907d900a67f22cb4f023c397230bf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:07:51,595 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7797): checking classloading for b369e749f12bbc3534a96e5f94df1c66 2024-12-01T17:07:51,596 INFO [StoreOpener-cb2907d900a67f22cb4f023c397230bf-1 {}] regionserver.HStore(327): Store=cb2907d900a67f22cb4f023c397230bf/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T17:07:51,596 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1038): replaying wal for cb2907d900a67f22cb4f023c397230bf 2024-12-01T17:07:51,597 INFO [StoreOpener-b369e749f12bbc3534a96e5f94df1c66-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b369e749f12bbc3534a96e5f94df1c66 2024-12-01T17:07:51,597 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithTargetName/cb2907d900a67f22cb4f023c397230bf 2024-12-01T17:07:51,597 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithTargetName/cb2907d900a67f22cb4f023c397230bf 2024-12-01T17:07:51,598 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1048): stopping wal replay for cb2907d900a67f22cb4f023c397230bf 2024-12-01T17:07:51,598 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1060): Cleaning up temporary data for cb2907d900a67f22cb4f023c397230bf 2024-12-01T17:07:51,598 INFO [StoreOpener-b369e749f12bbc3534a96e5f94df1c66-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b369e749f12bbc3534a96e5f94df1c66 columnFamilyName cf 2024-12-01T17:07:51,598 DEBUG [StoreOpener-b369e749f12bbc3534a96e5f94df1c66-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:07:51,599 INFO [StoreOpener-b369e749f12bbc3534a96e5f94df1c66-1 {}] regionserver.HStore(327): Store=b369e749f12bbc3534a96e5f94df1c66/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T17:07:51,599 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1038): replaying wal for b369e749f12bbc3534a96e5f94df1c66 2024-12-01T17:07:51,599 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1093): writing seq id for cb2907d900a67f22cb4f023c397230bf 2024-12-01T17:07:51,599 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithTargetName/b369e749f12bbc3534a96e5f94df1c66 2024-12-01T17:07:51,600 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithTargetName/b369e749f12bbc3534a96e5f94df1c66 2024-12-01T17:07:51,600 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1048): stopping wal replay for b369e749f12bbc3534a96e5f94df1c66 2024-12-01T17:07:51,600 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1060): Cleaning up temporary data for b369e749f12bbc3534a96e5f94df1c66 2024-12-01T17:07:51,601 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithTargetName/cb2907d900a67f22cb4f023c397230bf/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T17:07:51,601 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1093): writing seq id for b369e749f12bbc3534a96e5f94df1c66 2024-12-01T17:07:51,602 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1114): Opened cb2907d900a67f22cb4f023c397230bf; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64425557, jitterRate=-0.03998439013957977}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T17:07:51,602 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1122): Running coprocessor post-open hooks for cb2907d900a67f22cb4f023c397230bf 2024-12-01T17:07:51,602 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1006): Region open journal for cb2907d900a67f22cb4f023c397230bf: Running coprocessor pre-open hook at 1733072871590Writing region info on filesystem at 1733072871590Initializing all the Stores at 1733072871592 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733072871592Cleaning up temporary data from old regions at 1733072871598 (+6 ms)Running coprocessor post-open hooks at 1733072871602 (+4 ms)Region opened successfully at 1733072871602 2024-12-01T17:07:51,603 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,,1733072871239.cb2907d900a67f22cb4f023c397230bf., pid=48, masterSystemTime=1733072871584 2024-12-01T17:07:51,604 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithTargetName/b369e749f12bbc3534a96e5f94df1c66/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T17:07:51,604 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1114): Opened b369e749f12bbc3534a96e5f94df1c66; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59650552, jitterRate=-0.1111375093460083}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T17:07:51,605 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b369e749f12bbc3534a96e5f94df1c66 2024-12-01T17:07:51,605 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1006): Region open journal for b369e749f12bbc3534a96e5f94df1c66: Running coprocessor pre-open hook at 1733072871595Writing region info on filesystem at 1733072871595Initializing all the Stores at 1733072871596 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733072871596Cleaning up temporary data from old regions at 1733072871600 (+4 ms)Running coprocessor post-open hooks at 1733072871605 (+5 ms)Region opened successfully at 1733072871605 2024-12-01T17:07:51,606 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,,1733072871239.cb2907d900a67f22cb4f023c397230bf. 2024-12-01T17:07:51,606 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,,1733072871239.cb2907d900a67f22cb4f023c397230bf. 2024-12-01T17:07:51,606 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,1,1733072871239.b369e749f12bbc3534a96e5f94df1c66., pid=49, masterSystemTime=1733072871586 2024-12-01T17:07:51,607 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=cb2907d900a67f22cb4f023c397230bf, regionState=OPEN, openSeqNum=2, regionLocation=b87883a7882d,44753,1733072737741 2024-12-01T17:07:51,609 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=48, ppid=46, state=RUNNABLE, hasLock=false; OpenRegionProcedure cb2907d900a67f22cb4f023c397230bf, server=b87883a7882d,44753,1733072737741 because future has completed 2024-12-01T17:07:51,609 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,1,1733072871239.b369e749f12bbc3534a96e5f94df1c66. 2024-12-01T17:07:51,609 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,1,1733072871239.b369e749f12bbc3534a96e5f94df1c66. 2024-12-01T17:07:51,610 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=b369e749f12bbc3534a96e5f94df1c66, regionState=OPEN, openSeqNum=2, regionLocation=b87883a7882d,36521,1733072737896 2024-12-01T17:07:51,612 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=49, ppid=47, state=RUNNABLE, hasLock=false; OpenRegionProcedure b369e749f12bbc3534a96e5f94df1c66, server=b87883a7882d,36521,1733072737896 because future has completed 2024-12-01T17:07:51,614 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=48, resume processing ppid=46 2024-12-01T17:07:51,615 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=48, ppid=46, state=SUCCESS, hasLock=false; OpenRegionProcedure cb2907d900a67f22cb4f023c397230bf, server=b87883a7882d,44753,1733072737741 in 180 msec 2024-12-01T17:07:51,617 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=49, resume processing ppid=47 2024-12-01T17:07:51,617 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=46, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=cb2907d900a67f22cb4f023c397230bf, ASSIGN in 342 msec 2024-12-01T17:07:51,617 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=49, ppid=47, state=SUCCESS, hasLock=false; OpenRegionProcedure b369e749f12bbc3534a96e5f94df1c66, server=b87883a7882d,36521,1733072737896 in 181 msec 2024-12-01T17:07:51,619 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0001_000001 (auth:SIMPLE) from 127.0.0.1:55266 2024-12-01T17:07:51,619 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=47, resume processing ppid=45 2024-12-01T17:07:51,619 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=47, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=b369e749f12bbc3534a96e5f94df1c66, ASSIGN in 344 msec 2024-12-01T17:07:51,621 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-01T17:07:51,621 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733072871621"}]},"ts":"1733072871621"} 2024-12-01T17:07:51,623 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLED in hbase:meta 2024-12-01T17:07:51,624 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_POST_OPERATION 2024-12-01T17:07:51,625 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithTargetName jenkins: RWXCA 2024-12-01T17:07:51,629 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36521 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-01T17:07:51,630 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_3/usercache/jenkins/appcache/application_1733072744857_0001/container_1733072744857_0001_01_000001/launch_container.sh] 2024-12-01T17:07:51,630 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_3/usercache/jenkins/appcache/application_1733072744857_0001/container_1733072744857_0001_01_000001/container_tokens] 2024-12-01T17:07:51,630 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_3/usercache/jenkins/appcache/application_1733072744857_0001/container_1733072744857_0001_01_000001/sysfs] 2024-12-01T17:07:51,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:07:51,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:07:51,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:07:51,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:07:51,689 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-01T17:07:51,689 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-01T17:07:51,689 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-01T17:07:51,689 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-01T17:07:51,692 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=45, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName in 450 msec 2024-12-01T17:07:51,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-01T17:07:51,871 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithTargetName completed 2024-12-01T17:07:51,871 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportWithTargetName get assigned. Timeout = 60000ms 2024-12-01T17:07:51,871 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T17:07:51,879 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportWithTargetName assigned to meta. Checking AM states. 2024-12-01T17:07:51,879 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T17:07:51,880 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportWithTargetName assigned. 2024-12-01T17:07:51,880 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-01T17:07:51,884 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-01T17:07:51,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733072871884 (current time:1733072871884). 2024-12-01T17:07:51,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-01T17:07:51,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-01T17:07:51,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-01T17:07:51,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34478d52, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:07:51,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:07:51,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:07:51,886 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:07:51,886 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:07:51,886 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:07:51,887 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@558744df, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:07:51,887 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:07:51,887 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:07:51,887 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:07:51,888 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56518, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:07:51,888 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17612aa2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:07:51,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:07:51,890 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:07:51,890 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:07:51,891 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48758, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:07:51,892 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737. 2024-12-01T17:07:51,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:07:51,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:07:51,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:07:51,892 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:07:51,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2960631f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:07:51,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:07:51,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:07:51,894 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:07:51,894 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:07:51,894 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:07:51,894 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7faa0a08, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:07:51,895 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:07:51,895 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:07:51,895 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:07:51,895 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56536, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:07:51,896 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@467aa5fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:07:51,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:07:51,898 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:07:51,898 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:07:51,899 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48770, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:07:51,901 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:07:51,901 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:07:51,902 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57538, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:07:51,903 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737. 2024-12-01T17:07:51,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:07:51,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:07:51,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:07:51,904 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:07:51,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-01T17:07:51,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-01T17:07:51,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=50, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-01T17:07:51,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 50 2024-12-01T17:07:51,907 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-01T17:07:51,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-12-01T17:07:51,908 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-01T17:07:51,910 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-01T17:07:51,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741910_1086 (size=167) 2024-12-01T17:07:51,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741910_1086 (size=167) 2024-12-01T17:07:51,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741910_1086 (size=167) 2024-12-01T17:07:51,918 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-01T17:07:51,919 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cb2907d900a67f22cb4f023c397230bf}, {pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b369e749f12bbc3534a96e5f94df1c66}] 2024-12-01T17:07:51,920 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b369e749f12bbc3534a96e5f94df1c66 2024-12-01T17:07:51,920 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cb2907d900a67f22cb4f023c397230bf 2024-12-01T17:07:52,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-12-01T17:07:52,072 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44753 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=51 2024-12-01T17:07:52,072 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36521 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=52 2024-12-01T17:07:52,073 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1733072871239.cb2907d900a67f22cb4f023c397230bf. 2024-12-01T17:07:52,073 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1733072871239.b369e749f12bbc3534a96e5f94df1c66. 2024-12-01T17:07:52,073 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.HRegion(2603): Flush status journal for cb2907d900a67f22cb4f023c397230bf: 2024-12-01T17:07:52,073 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1733072871239.cb2907d900a67f22cb4f023c397230bf. for emptySnaptb0-testExportWithTargetName completed. 2024-12-01T17:07:52,073 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.HRegion(2603): Flush status journal for b369e749f12bbc3534a96e5f94df1c66: 2024-12-01T17:07:52,073 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1733072871239.b369e749f12bbc3534a96e5f94df1c66. for emptySnaptb0-testExportWithTargetName completed. 2024-12-01T17:07:52,073 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1733072871239.cb2907d900a67f22cb4f023c397230bf.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-01T17:07:52,073 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:07:52,073 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-01T17:07:52,074 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1733072871239.b369e749f12bbc3534a96e5f94df1c66.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-01T17:07:52,074 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:07:52,074 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-01T17:07:52,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741912_1088 (size=70) 2024-12-01T17:07:52,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741911_1087 (size=70) 2024-12-01T17:07:52,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741911_1087 (size=70) 2024-12-01T17:07:52,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741912_1088 (size=70) 2024-12-01T17:07:52,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741911_1087 (size=70) 2024-12-01T17:07:52,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741912_1088 (size=70) 2024-12-01T17:07:52,085 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1733072871239.b369e749f12bbc3534a96e5f94df1c66. 2024-12-01T17:07:52,085 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1733072871239.cb2907d900a67f22cb4f023c397230bf. 2024-12-01T17:07:52,085 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=51 2024-12-01T17:07:52,085 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-12-01T17:07:52,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=51 2024-12-01T17:07:52,086 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region cb2907d900a67f22cb4f023c397230bf 2024-12-01T17:07:52,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=52 2024-12-01T17:07:52,086 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region b369e749f12bbc3534a96e5f94df1c66 2024-12-01T17:07:52,086 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cb2907d900a67f22cb4f023c397230bf 2024-12-01T17:07:52,086 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b369e749f12bbc3534a96e5f94df1c66 2024-12-01T17:07:52,088 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=51, ppid=50, state=SUCCESS, hasLock=false; SnapshotRegionProcedure cb2907d900a67f22cb4f023c397230bf in 169 msec 2024-12-01T17:07:52,091 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=52, resume processing ppid=50 2024-12-01T17:07:52,091 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=52, ppid=50, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b369e749f12bbc3534a96e5f94df1c66 in 169 msec 2024-12-01T17:07:52,091 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-01T17:07:52,092 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-01T17:07:52,092 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-01T17:07:52,093 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithTargetName 2024-12-01T17:07:52,093 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName 2024-12-01T17:07:52,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741913_1089 (size=549) 2024-12-01T17:07:52,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741913_1089 (size=549) 2024-12-01T17:07:52,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741913_1089 (size=549) 2024-12-01T17:07:52,104 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-01T17:07:52,110 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-01T17:07:52,111 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/emptySnaptb0-testExportWithTargetName 2024-12-01T17:07:52,112 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-01T17:07:52,112 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 50 2024-12-01T17:07:52,114 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=50, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 207 msec 2024-12-01T17:07:52,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-12-01T17:07:52,221 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-12-01T17:07:52,229 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='0a549eb3cbc3fbc4d3cb287f1a3d9e7b0', locateType=CURRENT is [region=testtb-testExportWithTargetName,,1733072871239.cb2907d900a67f22cb4f023c397230bf., hostname=b87883a7882d,44753,1733072737741, seqNum=2] 2024-12-01T17:07:52,230 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='1963285ab3788405f9310d890562a7aa1', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1733072871239.b369e749f12bbc3534a96e5f94df1c66., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:07:52,232 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='2617008400f0a584c0bb45630429d02b6', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1733072871239.b369e749f12bbc3534a96e5f94df1c66., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:07:52,233 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='30e16f2d31d4bb516b133673efa1b0c99', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1733072871239.b369e749f12bbc3534a96e5f94df1c66., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:07:52,234 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='423f725ee4ce125e1458fb82e6f6e203b', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1733072871239.b369e749f12bbc3534a96e5f94df1c66., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:07:52,234 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='5623a4bd86b18e2654142a7c011ad3f39', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1733072871239.b369e749f12bbc3534a96e5f94df1c66., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:07:52,235 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:07:52,236 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44753 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,,1733072871239.cb2907d900a67f22cb4f023c397230bf. with WAL disabled. Data may be lost in the event of a crash. 2024-12-01T17:07:52,237 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57554, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:07:52,242 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36521 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,1,1733072871239.b369e749f12bbc3534a96e5f94df1c66. with WAL disabled. Data may be lost in the event of a crash. 2024-12-01T17:07:52,245 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-01T17:07:52,248 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithTargetName 2024-12-01T17:07:52,248 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithTargetName,,1733072871239.cb2907d900a67f22cb4f023c397230bf. 2024-12-01T17:07:52,248 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T17:07:52,250 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-01T17:07:52,255 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-01T17:07:52,263 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-01T17:07:52,266 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-01T17:07:52,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733072872266 (current time:1733072872266). 2024-12-01T17:07:52,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-01T17:07:52,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-01T17:07:52,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-01T17:07:52,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@159a5083, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:07:52,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:07:52,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:07:52,268 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:07:52,268 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:07:52,268 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:07:52,268 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@231c6e54, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:07:52,268 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:07:52,269 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:07:52,269 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:07:52,269 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56546, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:07:52,270 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d03f78e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:07:52,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:07:52,271 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:07:52,271 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:07:52,272 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48784, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:07:52,274 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737. 2024-12-01T17:07:52,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:07:52,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:07:52,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:07:52,274 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:07:52,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5df19b50, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:07:52,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:07:52,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:07:52,276 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:07:52,276 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:07:52,276 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:07:52,276 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a56629e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:07:52,276 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:07:52,276 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:07:52,277 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:07:52,277 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56556, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:07:52,278 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ccda1b3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:07:52,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:07:52,280 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:07:52,280 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:07:52,281 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48798, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:07:52,283 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:07:52,283 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:07:52,285 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57564, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:07:52,286 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737. 2024-12-01T17:07:52,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:07:52,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:07:52,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:07:52,286 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:07:52,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-01T17:07:52,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-01T17:07:52,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=53, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-01T17:07:52,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 53 2024-12-01T17:07:52,289 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-01T17:07:52,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-01T17:07:52,291 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-01T17:07:52,293 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-01T17:07:52,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741914_1090 (size=162) 2024-12-01T17:07:52,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741914_1090 (size=162) 2024-12-01T17:07:52,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741914_1090 (size=162) 2024-12-01T17:07:52,302 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-01T17:07:52,302 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cb2907d900a67f22cb4f023c397230bf}, {pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b369e749f12bbc3534a96e5f94df1c66}] 2024-12-01T17:07:52,303 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b369e749f12bbc3534a96e5f94df1c66 2024-12-01T17:07:52,303 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cb2907d900a67f22cb4f023c397230bf 2024-12-01T17:07:52,337 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithTargetName' 2024-12-01T17:07:52,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-01T17:07:52,457 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44753 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=54 2024-12-01T17:07:52,457 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36521 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=55 2024-12-01T17:07:52,457 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1733072871239.cb2907d900a67f22cb4f023c397230bf. 2024-12-01T17:07:52,458 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1733072871239.b369e749f12bbc3534a96e5f94df1c66. 2024-12-01T17:07:52,458 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(2902): Flushing cb2907d900a67f22cb4f023c397230bf 1/1 column families, dataSize=65 B heapSize=400 B 2024-12-01T17:07:52,458 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(2902): Flushing b369e749f12bbc3534a96e5f94df1c66 1/1 column families, dataSize=3.19 KB heapSize=7.14 KB 2024-12-01T17:07:52,473 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithTargetName/b369e749f12bbc3534a96e5f94df1c66/.tmp/cf/5c7b2db03e6e409281e25814fd97f139 is 71, key is 1bb26d7135290997e2378bc1df118779/cf:q/1733072872242/Put/seqid=0 2024-12-01T17:07:52,478 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithTargetName/cb2907d900a67f22cb4f023c397230bf/.tmp/cf/7351105358f741c595a8773c664ad2d1 is 69, key is 0a549eb3cbc3fbc4d3cb287f1a3d9e7b0/cf:q/1733072872236/Put/seqid=0 2024-12-01T17:07:52,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741915_1091 (size=8462) 2024-12-01T17:07:52,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741915_1091 (size=8462) 2024-12-01T17:07:52,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741915_1091 (size=8462) 2024-12-01T17:07:52,488 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.19 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithTargetName/b369e749f12bbc3534a96e5f94df1c66/.tmp/cf/5c7b2db03e6e409281e25814fd97f139 2024-12-01T17:07:52,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741916_1092 (size=5149) 2024-12-01T17:07:52,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741916_1092 (size=5149) 2024-12-01T17:07:52,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741916_1092 (size=5149) 2024-12-01T17:07:52,497 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithTargetName/cb2907d900a67f22cb4f023c397230bf/.tmp/cf/7351105358f741c595a8773c664ad2d1 2024-12-01T17:07:52,504 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithTargetName/cb2907d900a67f22cb4f023c397230bf/.tmp/cf/7351105358f741c595a8773c664ad2d1 as hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithTargetName/cb2907d900a67f22cb4f023c397230bf/cf/7351105358f741c595a8773c664ad2d1 2024-12-01T17:07:52,507 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithTargetName/b369e749f12bbc3534a96e5f94df1c66/.tmp/cf/5c7b2db03e6e409281e25814fd97f139 as hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithTargetName/b369e749f12bbc3534a96e5f94df1c66/cf/5c7b2db03e6e409281e25814fd97f139 2024-12-01T17:07:52,510 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithTargetName/cb2907d900a67f22cb4f023c397230bf/cf/7351105358f741c595a8773c664ad2d1, entries=1, sequenceid=6, filesize=5.0 K 2024-12-01T17:07:52,510 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(3140): Finished flush of dataSize ~65 B/65, heapSize ~384 B/384, currentSize=0 B/0 for cb2907d900a67f22cb4f023c397230bf in 52ms, sequenceid=6, compaction requested=false 2024-12-01T17:07:52,511 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(2603): Flush status journal for cb2907d900a67f22cb4f023c397230bf: 2024-12-01T17:07:52,511 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1733072871239.cb2907d900a67f22cb4f023c397230bf. for snaptb0-testExportWithTargetName completed. 2024-12-01T17:07:52,511 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1733072871239.cb2907d900a67f22cb4f023c397230bf.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-01T17:07:52,511 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:07:52,511 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithTargetName/cb2907d900a67f22cb4f023c397230bf/cf/7351105358f741c595a8773c664ad2d1] hfiles 2024-12-01T17:07:52,511 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithTargetName/cb2907d900a67f22cb4f023c397230bf/cf/7351105358f741c595a8773c664ad2d1 for snapshot=snaptb0-testExportWithTargetName 2024-12-01T17:07:52,513 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithTargetName/b369e749f12bbc3534a96e5f94df1c66/cf/5c7b2db03e6e409281e25814fd97f139, entries=49, sequenceid=6, filesize=8.3 K 2024-12-01T17:07:52,514 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(3140): Finished flush of dataSize ~3.19 KB/3271, heapSize ~7.13 KB/7296, currentSize=0 B/0 for b369e749f12bbc3534a96e5f94df1c66 in 56ms, sequenceid=6, compaction requested=false 2024-12-01T17:07:52,514 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(2603): Flush status journal for b369e749f12bbc3534a96e5f94df1c66: 2024-12-01T17:07:52,514 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1733072871239.b369e749f12bbc3534a96e5f94df1c66. for snaptb0-testExportWithTargetName completed. 2024-12-01T17:07:52,515 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1733072871239.b369e749f12bbc3534a96e5f94df1c66.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-01T17:07:52,515 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:07:52,515 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithTargetName/b369e749f12bbc3534a96e5f94df1c66/cf/5c7b2db03e6e409281e25814fd97f139] hfiles 2024-12-01T17:07:52,515 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithTargetName/b369e749f12bbc3534a96e5f94df1c66/cf/5c7b2db03e6e409281e25814fd97f139 for snapshot=snaptb0-testExportWithTargetName 2024-12-01T17:07:52,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741917_1093 (size=109) 2024-12-01T17:07:52,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741917_1093 (size=109) 2024-12-01T17:07:52,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741917_1093 (size=109) 2024-12-01T17:07:52,524 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1733072871239.cb2907d900a67f22cb4f023c397230bf. 2024-12-01T17:07:52,524 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-12-01T17:07:52,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=54 2024-12-01T17:07:52,525 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region cb2907d900a67f22cb4f023c397230bf 2024-12-01T17:07:52,525 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cb2907d900a67f22cb4f023c397230bf 2024-12-01T17:07:52,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741918_1094 (size=109) 2024-12-01T17:07:52,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741918_1094 (size=109) 2024-12-01T17:07:52,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741918_1094 (size=109) 2024-12-01T17:07:52,528 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=54, ppid=53, state=SUCCESS, hasLock=false; SnapshotRegionProcedure cb2907d900a67f22cb4f023c397230bf in 224 msec 2024-12-01T17:07:52,528 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1733072871239.b369e749f12bbc3534a96e5f94df1c66. 2024-12-01T17:07:52,528 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=55 2024-12-01T17:07:52,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=55 2024-12-01T17:07:52,529 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region b369e749f12bbc3534a96e5f94df1c66 2024-12-01T17:07:52,529 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b369e749f12bbc3534a96e5f94df1c66 2024-12-01T17:07:52,532 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=55, resume processing ppid=53 2024-12-01T17:07:52,532 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-01T17:07:52,532 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=55, ppid=53, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b369e749f12bbc3534a96e5f94df1c66 in 228 msec 2024-12-01T17:07:52,533 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-01T17:07:52,534 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-01T17:07:52,534 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithTargetName 2024-12-01T17:07:52,534 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName 2024-12-01T17:07:52,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741919_1095 (size=627) 2024-12-01T17:07:52,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741919_1095 (size=627) 2024-12-01T17:07:52,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741919_1095 (size=627) 2024-12-01T17:07:52,545 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-01T17:07:52,550 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-01T17:07:52,551 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-01T17:07:52,552 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-01T17:07:52,553 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 53 2024-12-01T17:07:52,554 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=53, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 266 msec 2024-12-01T17:07:52,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-01T17:07:52,611 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-12-01T17:07:52,612 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072872612 2024-12-01T17:07:52,612 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:44451, tgtDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072872612, rawTgtDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072872612, srcFsUri=hdfs://localhost:44451, srcDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:07:52,640 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:44451, inputRoot=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:07:52,640 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-69539515_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072872612, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072872612/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-01T17:07:52,642 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-01T17:07:52,647 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testExportWithTargetName to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072872612/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-01T17:07:52,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741920_1096 (size=162) 2024-12-01T17:07:52,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741920_1096 (size=162) 2024-12-01T17:07:52,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741920_1096 (size=162) 2024-12-01T17:07:52,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741921_1097 (size=627) 2024-12-01T17:07:52,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741921_1097 (size=627) 2024-12-01T17:07:52,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741921_1097 (size=627) 2024-12-01T17:07:52,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741922_1098 (size=154) 2024-12-01T17:07:52,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741922_1098 (size=154) 2024-12-01T17:07:52,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741922_1098 (size=154) 2024-12-01T17:07:52,696 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:07:52,697 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:07:52,697 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:07:52,986 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-01T17:07:53,555 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/hadoop-15225290680394381940.jar 2024-12-01T17:07:53,556 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:07:53,556 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:07:53,615 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/hadoop-12213851234261755176.jar 2024-12-01T17:07:53,615 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:07:53,616 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:07:53,616 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:07:53,617 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:07:53,617 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:07:53,618 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:07:53,618 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-01T17:07:53,618 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-01T17:07:53,618 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-01T17:07:53,619 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-01T17:07:53,619 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-01T17:07:53,619 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-01T17:07:53,620 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-01T17:07:53,620 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-01T17:07:53,620 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-01T17:07:53,620 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-01T17:07:53,621 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-01T17:07:53,621 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:07:53,621 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:07:53,622 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-01T17:07:53,622 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:07:53,622 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:07:53,622 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-01T17:07:53,623 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-01T17:07:53,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741923_1099 (size=24020) 2024-12-01T17:07:53,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741923_1099 (size=24020) 2024-12-01T17:07:53,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741923_1099 (size=24020) 2024-12-01T17:07:53,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741924_1100 (size=77755) 2024-12-01T17:07:53,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741924_1100 (size=77755) 2024-12-01T17:07:53,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741924_1100 (size=77755) 2024-12-01T17:07:53,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741925_1101 (size=131360) 2024-12-01T17:07:53,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741925_1101 (size=131360) 2024-12-01T17:07:53,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741925_1101 (size=131360) 2024-12-01T17:07:53,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741926_1102 (size=111793) 2024-12-01T17:07:53,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741926_1102 (size=111793) 2024-12-01T17:07:53,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741926_1102 (size=111793) 2024-12-01T17:07:53,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741927_1103 (size=1832290) 2024-12-01T17:07:53,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741927_1103 (size=1832290) 2024-12-01T17:07:53,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741927_1103 (size=1832290) 2024-12-01T17:07:53,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741928_1104 (size=8360005) 2024-12-01T17:07:53,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741928_1104 (size=8360005) 2024-12-01T17:07:53,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741928_1104 (size=8360005) 2024-12-01T17:07:53,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741929_1105 (size=503880) 2024-12-01T17:07:53,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741929_1105 (size=503880) 2024-12-01T17:07:53,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741929_1105 (size=503880) 2024-12-01T17:07:53,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741930_1106 (size=322274) 2024-12-01T17:07:53,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741930_1106 (size=322274) 2024-12-01T17:07:53,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741930_1106 (size=322274) 2024-12-01T17:07:53,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741931_1107 (size=20406) 2024-12-01T17:07:53,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741931_1107 (size=20406) 2024-12-01T17:07:53,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741931_1107 (size=20406) 2024-12-01T17:07:53,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741932_1108 (size=45609) 2024-12-01T17:07:53,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741932_1108 (size=45609) 2024-12-01T17:07:53,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741932_1108 (size=45609) 2024-12-01T17:07:53,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741933_1109 (size=6424742) 2024-12-01T17:07:53,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741933_1109 (size=6424742) 2024-12-01T17:07:53,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741933_1109 (size=6424742) 2024-12-01T17:07:53,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741934_1110 (size=136454) 2024-12-01T17:07:53,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741934_1110 (size=136454) 2024-12-01T17:07:53,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741934_1110 (size=136454) 2024-12-01T17:07:53,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741935_1111 (size=1597136) 2024-12-01T17:07:53,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741935_1111 (size=1597136) 2024-12-01T17:07:53,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741935_1111 (size=1597136) 2024-12-01T17:07:53,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741936_1112 (size=30873) 2024-12-01T17:07:53,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741936_1112 (size=30873) 2024-12-01T17:07:53,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741936_1112 (size=30873) 2024-12-01T17:07:53,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741937_1113 (size=29229) 2024-12-01T17:07:53,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741937_1113 (size=29229) 2024-12-01T17:07:53,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741937_1113 (size=29229) 2024-12-01T17:07:53,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741938_1114 (size=903864) 2024-12-01T17:07:53,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741938_1114 (size=903864) 2024-12-01T17:07:53,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741938_1114 (size=903864) 2024-12-01T17:07:53,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741939_1115 (size=5175431) 2024-12-01T17:07:53,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741939_1115 (size=5175431) 2024-12-01T17:07:53,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741939_1115 (size=5175431) 2024-12-01T17:07:53,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741940_1116 (size=232881) 2024-12-01T17:07:53,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741940_1116 (size=232881) 2024-12-01T17:07:53,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741940_1116 (size=232881) 2024-12-01T17:07:53,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741941_1117 (size=1323991) 2024-12-01T17:07:53,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741941_1117 (size=1323991) 2024-12-01T17:07:53,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741941_1117 (size=1323991) 2024-12-01T17:07:53,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741942_1118 (size=4695811) 2024-12-01T17:07:53,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741942_1118 (size=4695811) 2024-12-01T17:07:53,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741942_1118 (size=4695811) 2024-12-01T17:07:53,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741943_1119 (size=1877034) 2024-12-01T17:07:53,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741943_1119 (size=1877034) 2024-12-01T17:07:53,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741943_1119 (size=1877034) 2024-12-01T17:07:53,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741944_1120 (size=217555) 2024-12-01T17:07:53,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741944_1120 (size=217555) 2024-12-01T17:07:53,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741944_1120 (size=217555) 2024-12-01T17:07:53,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741945_1121 (size=4188619) 2024-12-01T17:07:53,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741945_1121 (size=4188619) 2024-12-01T17:07:53,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741945_1121 (size=4188619) 2024-12-01T17:07:53,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741946_1122 (size=127628) 2024-12-01T17:07:53,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741946_1122 (size=127628) 2024-12-01T17:07:53,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741946_1122 (size=127628) 2024-12-01T17:07:53,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741947_1123 (size=440956) 2024-12-01T17:07:53,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741947_1123 (size=440956) 2024-12-01T17:07:53,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741947_1123 (size=440956) 2024-12-01T17:07:53,989 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-01T17:07:53,992 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithTargetName' hfile list 2024-12-01T17:07:53,994 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.3 K 2024-12-01T17:07:53,994 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.0 K 2024-12-01T17:07:54,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741948_1124 (size=445) 2024-12-01T17:07:54,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741948_1124 (size=445) 2024-12-01T17:07:54,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741948_1124 (size=445) 2024-12-01T17:07:54,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741949_1125 (size=21) 2024-12-01T17:07:54,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741949_1125 (size=21) 2024-12-01T17:07:54,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741949_1125 (size=21) 2024-12-01T17:07:54,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741950_1126 (size=304088) 2024-12-01T17:07:54,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741950_1126 (size=304088) 2024-12-01T17:07:54,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741950_1126 (size=304088) 2024-12-01T17:07:54,047 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-01T17:07:54,047 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-01T17:07:54,487 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0002_000001 (auth:SIMPLE) from 127.0.0.1:59908 2024-12-01T17:07:57,330 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-01T17:07:57,330 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName Metrics about Tables on a single HBase RegionServer 2024-12-01T17:07:57,331 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:57,331 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion 2024-12-01T17:07:59,271 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0002_000001 (auth:SIMPLE) from 127.0.0.1:41456 2024-12-01T17:07:59,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741951_1127 (size=349786) 2024-12-01T17:07:59,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741951_1127 (size=349786) 2024-12-01T17:07:59,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741951_1127 (size=349786) 2024-12-01T17:08:01,497 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0002_000001 (auth:SIMPLE) from 127.0.0.1:59924 2024-12-01T17:08:01,497 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0002_000001 (auth:SIMPLE) from 127.0.0.1:40678 2024-12-01T17:08:02,834 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-01T17:08:05,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741952_1128 (size=8462) 2024-12-01T17:08:05,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741952_1128 (size=8462) 2024-12-01T17:08:05,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741952_1128 (size=8462) 2024-12-01T17:08:05,276 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_1/usercache/jenkins/appcache/application_1733072744857_0002/container_1733072744857_0002_01_000002/launch_container.sh] 2024-12-01T17:08:05,276 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_1/usercache/jenkins/appcache/application_1733072744857_0002/container_1733072744857_0002_01_000002/container_tokens] 2024-12-01T17:08:05,276 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_1/usercache/jenkins/appcache/application_1733072744857_0002/container_1733072744857_0002_01_000002/sysfs] 2024-12-01T17:08:05,858 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-01T17:08:06,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741954_1130 (size=5149) 2024-12-01T17:08:06,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741954_1130 (size=5149) 2024-12-01T17:08:06,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741954_1130 (size=5149) 2024-12-01T17:08:06,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741953_1129 (size=22163) 2024-12-01T17:08:06,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741953_1129 (size=22163) 2024-12-01T17:08:06,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741953_1129 (size=22163) 2024-12-01T17:08:06,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741955_1131 (size=464) 2024-12-01T17:08:06,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741955_1131 (size=464) 2024-12-01T17:08:06,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741955_1131 (size=464) 2024-12-01T17:08:06,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741956_1132 (size=22163) 2024-12-01T17:08:06,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741956_1132 (size=22163) 2024-12-01T17:08:06,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741956_1132 (size=22163) 2024-12-01T17:08:06,538 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_3/usercache/jenkins/appcache/application_1733072744857_0002/container_1733072744857_0002_01_000003/launch_container.sh] 2024-12-01T17:08:06,539 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_3/usercache/jenkins/appcache/application_1733072744857_0002/container_1733072744857_0002_01_000003/container_tokens] 2024-12-01T17:08:06,539 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_3/usercache/jenkins/appcache/application_1733072744857_0002/container_1733072744857_0002_01_000003/sysfs] 2024-12-01T17:08:06,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741957_1133 (size=349786) 2024-12-01T17:08:06,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741957_1133 (size=349786) 2024-12-01T17:08:06,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741957_1133 (size=349786) 2024-12-01T17:08:06,580 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0002_000001 (auth:SIMPLE) from 127.0.0.1:54410 2024-12-01T17:08:08,294 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-01T17:08:08,297 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-01T17:08:08,351 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: testExportWithTargetName 2024-12-01T17:08:08,352 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-01T17:08:08,353 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-01T17:08:08,353 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-69539515_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testExportWithTargetName at hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-01T17:08:08,354 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testExportWithTargetName/.snapshotinfo 2024-12-01T17:08:08,354 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testExportWithTargetName/data.manifest 2024-12-01T17:08:08,354 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-69539515_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072872612/.hbase-snapshot/testExportWithTargetName at hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072872612/.hbase-snapshot/testExportWithTargetName 2024-12-01T17:08:08,355 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072872612/.hbase-snapshot/testExportWithTargetName/.snapshotinfo 2024-12-01T17:08:08,355 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072872612/.hbase-snapshot/testExportWithTargetName/data.manifest 2024-12-01T17:08:08,383 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testtb-testExportWithTargetName 2024-12-01T17:08:08,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=56, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName 2024-12-01T17:08:08,390 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733072888389"}]},"ts":"1733072888389"} 2024-12-01T17:08:08,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-01T17:08:08,394 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLING in hbase:meta 2024-12-01T17:08:08,394 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithTargetName to state=DISABLING 2024-12-01T17:08:08,396 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=57, ppid=56, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName}] 2024-12-01T17:08:08,398 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=cb2907d900a67f22cb4f023c397230bf, UNASSIGN}, {pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=b369e749f12bbc3534a96e5f94df1c66, UNASSIGN}] 2024-12-01T17:08:08,400 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=b369e749f12bbc3534a96e5f94df1c66, UNASSIGN 2024-12-01T17:08:08,400 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=cb2907d900a67f22cb4f023c397230bf, UNASSIGN 2024-12-01T17:08:08,402 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=58 updating hbase:meta row=cb2907d900a67f22cb4f023c397230bf, regionState=CLOSING, regionLocation=b87883a7882d,44753,1733072737741 2024-12-01T17:08:08,402 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=b369e749f12bbc3534a96e5f94df1c66, regionState=CLOSING, regionLocation=b87883a7882d,36521,1733072737896 2024-12-01T17:08:08,406 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=cb2907d900a67f22cb4f023c397230bf, UNASSIGN because future has completed 2024-12-01T17:08:08,406 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-01T17:08:08,406 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=60, ppid=58, state=RUNNABLE, hasLock=false; CloseRegionProcedure cb2907d900a67f22cb4f023c397230bf, server=b87883a7882d,44753,1733072737741}] 2024-12-01T17:08:08,409 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=b369e749f12bbc3534a96e5f94df1c66, UNASSIGN because future has completed 2024-12-01T17:08:08,410 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-01T17:08:08,410 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=61, ppid=59, state=RUNNABLE, hasLock=false; CloseRegionProcedure b369e749f12bbc3534a96e5f94df1c66, server=b87883a7882d,36521,1733072737896}] 2024-12-01T17:08:08,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-01T17:08:08,564 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(122): Close cb2907d900a67f22cb4f023c397230bf 2024-12-01T17:08:08,565 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-01T17:08:08,565 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1722): Closing cb2907d900a67f22cb4f023c397230bf, disabling compactions & flushes 2024-12-01T17:08:08,565 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1733072871239.cb2907d900a67f22cb4f023c397230bf. 2024-12-01T17:08:08,565 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1733072871239.cb2907d900a67f22cb4f023c397230bf. 2024-12-01T17:08:08,565 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1733072871239.cb2907d900a67f22cb4f023c397230bf. after waiting 0 ms 2024-12-01T17:08:08,565 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1733072871239.cb2907d900a67f22cb4f023c397230bf. 2024-12-01T17:08:08,595 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(122): Close b369e749f12bbc3534a96e5f94df1c66 2024-12-01T17:08:08,595 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-01T17:08:08,595 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1722): Closing b369e749f12bbc3534a96e5f94df1c66, disabling compactions & flushes 2024-12-01T17:08:08,596 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1733072871239.b369e749f12bbc3534a96e5f94df1c66. 2024-12-01T17:08:08,596 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1733072871239.b369e749f12bbc3534a96e5f94df1c66. 2024-12-01T17:08:08,596 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1733072871239.b369e749f12bbc3534a96e5f94df1c66. after waiting 0 ms 2024-12-01T17:08:08,596 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1733072871239.b369e749f12bbc3534a96e5f94df1c66. 2024-12-01T17:08:08,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-01T17:08:09,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-01T17:08:09,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-01T17:08:10,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-01T17:08:11,796 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithTargetName/b369e749f12bbc3534a96e5f94df1c66/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-01T17:08:11,796 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithTargetName/cb2907d900a67f22cb4f023c397230bf/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-01T17:08:11,797 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-01T17:08:11,798 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1733072871239.b369e749f12bbc3534a96e5f94df1c66. 2024-12-01T17:08:11,798 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-01T17:08:11,798 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1733072871239.cb2907d900a67f22cb4f023c397230bf. 2024-12-01T17:08:11,798 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1676): Region close journal for b369e749f12bbc3534a96e5f94df1c66: Waiting for close lock at 1733072888595Running coprocessor pre-close hooks at 1733072888595Disabling compacts and flushes for region at 1733072888595Disabling writes for close at 1733072888596 (+1 ms)Writing region close event to WAL at 1733072888616 (+20 ms)Running coprocessor post-close hooks at 1733072891797 (+3181 ms)Closed at 1733072891797 2024-12-01T17:08:11,798 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1676): Region close journal for cb2907d900a67f22cb4f023c397230bf: Waiting for close lock at 1733072888565Running coprocessor pre-close hooks at 1733072888565Disabling compacts and flushes for region at 1733072888565Disabling writes for close at 1733072888565Writing region close event to WAL at 1733072888612 (+47 ms)Running coprocessor post-close hooks at 1733072891797 (+3185 ms)Closed at 1733072891798 (+1 ms) 2024-12-01T17:08:11,805 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-69539515_22 at /127.0.0.1:45884 [Receiving block BP-371634539-172.17.0.3-1733072730982:blk_1073741830_1006] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 2029ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data4/, blockId=1073741830, seqno=476 2024-12-01T17:08:11,806 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-69539515_22 at /127.0.0.1:48312 [Receiving block BP-371634539-172.17.0.3-1733072730982:blk_1073741830_1006] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 2028ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data6/, blockId=1073741830, seqno=476 2024-12-01T17:08:11,808 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-69539515_22 at /127.0.0.1:36134 [Receiving block BP-371634539-172.17.0.3-1733072730982:blk_1073741830_1006] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 2031ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data2/, blockId=1073741830, seqno=476 2024-12-01T17:08:11,808 INFO [AsyncFSWAL-0-hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/MasterData-prefix:b87883a7882d,38737,1733072736902 {}] wal.AbstractFSWAL(1368): Slow sync cost: 2031 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36379,DS-66fd9c32-3ad4-437a-8b5c-d300efc8d3c7,DISK], DatanodeInfoWithStorage[127.0.0.1:44603,DS-b6e128c0-a4bb-411c-90ad-04cc93436444,DISK], DatanodeInfoWithStorage[127.0.0.1:36291,DS-77e20430-c1a6-42bb-bd62-c245d594c058,DISK]] 2024-12-01T17:08:11,811 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(157): Closed b369e749f12bbc3534a96e5f94df1c66 2024-12-01T17:08:11,813 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=b369e749f12bbc3534a96e5f94df1c66, regionState=CLOSED 2024-12-01T17:08:11,815 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(157): Closed cb2907d900a67f22cb4f023c397230bf 2024-12-01T17:08:11,824 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=58 updating hbase:meta row=cb2907d900a67f22cb4f023c397230bf, regionState=CLOSED 2024-12-01T17:08:11,829 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=61, ppid=59, state=RUNNABLE, hasLock=false; CloseRegionProcedure b369e749f12bbc3534a96e5f94df1c66, server=b87883a7882d,36521,1733072737896 because future has completed 2024-12-01T17:08:11,831 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=60, ppid=58, state=RUNNABLE, hasLock=false; CloseRegionProcedure cb2907d900a67f22cb4f023c397230bf, server=b87883a7882d,44753,1733072737741 because future has completed 2024-12-01T17:08:11,836 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=61, resume processing ppid=59 2024-12-01T17:08:11,837 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=61, ppid=59, state=SUCCESS, hasLock=false; CloseRegionProcedure b369e749f12bbc3534a96e5f94df1c66, server=b87883a7882d,36521,1733072737896 in 3.4210 sec 2024-12-01T17:08:11,837 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=60, resume processing ppid=58 2024-12-01T17:08:11,838 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=60, ppid=58, state=SUCCESS, hasLock=false; CloseRegionProcedure cb2907d900a67f22cb4f023c397230bf, server=b87883a7882d,44753,1733072737741 in 3.4260 sec 2024-12-01T17:08:11,839 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=59, ppid=57, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=b369e749f12bbc3534a96e5f94df1c66, UNASSIGN in 3.4380 sec 2024-12-01T17:08:11,842 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=58, resume processing ppid=57 2024-12-01T17:08:11,843 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=58, ppid=57, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=cb2907d900a67f22cb4f023c397230bf, UNASSIGN in 3.4400 sec 2024-12-01T17:08:11,847 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=57, resume processing ppid=56 2024-12-01T17:08:11,848 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=57, ppid=56, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName in 3.4480 sec 2024-12-01T17:08:11,849 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733072891849"}]},"ts":"1733072891849"} 2024-12-01T17:08:11,853 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLED in hbase:meta 2024-12-01T17:08:11,853 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithTargetName to state=DISABLED 2024-12-01T17:08:11,859 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=56, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName in 3.4720 sec 2024-12-01T17:08:12,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-01T17:08:12,562 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithTargetName completed 2024-12-01T17:08:12,563 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testtb-testExportWithTargetName 2024-12-01T17:08:12,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-01T17:08:12,568 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-01T17:08:12,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithTargetName 2024-12-01T17:08:12,574 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=62, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-01T17:08:12,579 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36521 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithTargetName 2024-12-01T17:08:12,596 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithTargetName/cb2907d900a67f22cb4f023c397230bf 2024-12-01T17:08:12,600 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithTargetName/cb2907d900a67f22cb4f023c397230bf/cf, FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithTargetName/cb2907d900a67f22cb4f023c397230bf/recovered.edits] 2024-12-01T17:08:12,611 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithTargetName/cb2907d900a67f22cb4f023c397230bf/cf/7351105358f741c595a8773c664ad2d1 to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testExportWithTargetName/cb2907d900a67f22cb4f023c397230bf/cf/7351105358f741c595a8773c664ad2d1 2024-12-01T17:08:12,623 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithTargetName/b369e749f12bbc3534a96e5f94df1c66 2024-12-01T17:08:12,630 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithTargetName/cb2907d900a67f22cb4f023c397230bf/recovered.edits/9.seqid to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testExportWithTargetName/cb2907d900a67f22cb4f023c397230bf/recovered.edits/9.seqid 2024-12-01T17:08:12,632 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithTargetName/cb2907d900a67f22cb4f023c397230bf 2024-12-01T17:08:12,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-01T17:08:12,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-01T17:08:12,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-01T17:08:12,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-01T17:08:12,636 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-01T17:08:12,636 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-01T17:08:12,637 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-01T17:08:12,637 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-01T17:08:12,639 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithTargetName/b369e749f12bbc3534a96e5f94df1c66/cf, FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithTargetName/b369e749f12bbc3534a96e5f94df1c66/recovered.edits] 2024-12-01T17:08:12,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-01T17:08:12,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:08:12,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-01T17:08:12,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-01T17:08:12,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:08:12,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:08:12,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-01T17:08:12,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:08:12,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=62 2024-12-01T17:08:12,651 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithTargetName/b369e749f12bbc3534a96e5f94df1c66/cf/5c7b2db03e6e409281e25814fd97f139 to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testExportWithTargetName/b369e749f12bbc3534a96e5f94df1c66/cf/5c7b2db03e6e409281e25814fd97f139 2024-12-01T17:08:12,687 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithTargetName/b369e749f12bbc3534a96e5f94df1c66/recovered.edits/9.seqid to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testExportWithTargetName/b369e749f12bbc3534a96e5f94df1c66/recovered.edits/9.seqid 2024-12-01T17:08:12,693 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithTargetName/b369e749f12bbc3534a96e5f94df1c66 2024-12-01T17:08:12,693 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithTargetName regions 2024-12-01T17:08:12,698 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=62, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-01T17:08:12,705 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithTargetName from hbase:meta 2024-12-01T17:08:12,712 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithTargetName' descriptor. 2024-12-01T17:08:12,715 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=62, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-01T17:08:12,715 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithTargetName' from region states. 2024-12-01T17:08:12,715 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,,1733072871239.cb2907d900a67f22cb4f023c397230bf.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733072892715"}]},"ts":"9223372036854775807"} 2024-12-01T17:08:12,715 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,1,1733072871239.b369e749f12bbc3534a96e5f94df1c66.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733072892715"}]},"ts":"9223372036854775807"} 2024-12-01T17:08:12,720 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-01T17:08:12,720 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => cb2907d900a67f22cb4f023c397230bf, NAME => 'testtb-testExportWithTargetName,,1733072871239.cb2907d900a67f22cb4f023c397230bf.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => b369e749f12bbc3534a96e5f94df1c66, NAME => 'testtb-testExportWithTargetName,1,1733072871239.b369e749f12bbc3534a96e5f94df1c66.', STARTKEY => '1', ENDKEY => ''}] 2024-12-01T17:08:12,720 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithTargetName' as deleted. 2024-12-01T17:08:12,721 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733072892720"}]},"ts":"9223372036854775807"} 2024-12-01T17:08:12,729 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithTargetName state from META 2024-12-01T17:08:12,731 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=62, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-01T17:08:12,734 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=62, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName in 168 msec 2024-12-01T17:08:12,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=62 2024-12-01T17:08:12,751 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithTargetName 2024-12-01T17:08:12,751 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithTargetName completed 2024-12-01T17:08:12,795 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0002_000001 (auth:SIMPLE) from 127.0.0.1:49908 2024-12-01T17:08:12,797 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "emptySnaptb0-testExportWithTargetName" type: DISABLED 2024-12-01T17:08:12,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithTargetName 2024-12-01T17:08:12,803 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb0-testExportWithTargetName" type: DISABLED 2024-12-01T17:08:12,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithTargetName 2024-12-01T17:08:12,849 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_1/usercache/jenkins/appcache/application_1733072744857_0002/container_1733072744857_0002_01_000001/launch_container.sh] 2024-12-01T17:08:12,849 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_1/usercache/jenkins/appcache/application_1733072744857_0002/container_1733072744857_0002_01_000001/container_tokens] 2024-12-01T17:08:12,849 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_1/usercache/jenkins/appcache/application_1733072744857_0002/container_1733072744857_0002_01_000001/sysfs] 2024-12-01T17:08:12,861 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=793 (was 765) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-69539515_22 at /127.0.0.1:53778 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1396781562) connection to localhost/127.0.0.1:35623 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-69539515_22 at /127.0.0.1:54122 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.offer(SynchronousQueue.java:857) app//org.apache.hadoop.ipc.Client$Connection.sendRpcRequest(Client.java:1187) app//org.apache.hadoop.ipc.Client.call(Client.java:1479) app//org.apache.hadoop.ipc.Client.call(Client.java:1426) app//org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) app//org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) app/jdk.proxy2/jdk.proxy2.$Proxy130.stopContainers(Unknown Source) app//org.apache.hadoop.yarn.api.impl.pb.client.ContainerManagementProtocolPBClientImpl.stopContainers(ContainerManagementProtocolPBClientImpl.java:147) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) app//org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) app//org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) app//org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) app//org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) app/jdk.proxy2/jdk.proxy2.$Proxy131.stopContainers(Unknown Source) app//org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncher.cleanup(AMLauncher.java:150) app//org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncher.run(AMLauncher.java:351) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38841 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35623 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1396781562) connection to localhost/127.0.0.1:39961 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39961 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1396781562) connection to localhost/127.0.0.1:46051 from appattempt_1733072744857_0002_000001 java.base@17.0.11/java.util.Collections$SetFromMap.(Collections.java:5677) java.base@17.0.11/java.util.Collections.newSetFromMap(Collections.java:5660) java.base@17.0.11/jdk.internal.misc.TerminatingThreadLocal$1.initialValue(TerminatingThreadLocal.java:102) java.base@17.0.11/jdk.internal.misc.TerminatingThreadLocal$1.initialValue(TerminatingThreadLocal.java:99) java.base@17.0.11/java.lang.ThreadLocal.setInitialValue(ThreadLocal.java:195) java.base@17.0.11/java.lang.ThreadLocal.get(ThreadLocal.java:172) java.base@17.0.11/jdk.internal.misc.TerminatingThreadLocal.register(TerminatingThreadLocal.java:82) java.base@17.0.11/java.lang.ThreadLocal.setInitialValue(ThreadLocal.java:204) java.base@17.0.11/java.lang.ThreadLocal.get(ThreadLocal.java:172) java.base@17.0.11/sun.nio.ch.Util.getTemporaryDirectBuffer(Util.java:231) java.base@17.0.11/sun.nio.ch.IOUtil.read(IOUtil.java:293) java.base@17.0.11/sun.nio.ch.IOUtil.read(IOUtil.java:259) java.base@17.0.11/sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:417) app//org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.FilterInputStream.read(FilterInputStream.java:132) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.FilterInputStream.read(FilterInputStream.java:82) java.base@17.0.11/java.io.FilterInputStream.read(FilterInputStream.java:82) app//org.apache.hadoop.ipc.Client$Connection$PingInputStream.read(Client.java:518) java.base@17.0.11/java.io.DataInputStream.readInt(DataInputStream.java:381) app//org.apache.hadoop.ipc.Client$IpcStreams.readResponse(Client.java:1923) app//org.apache.hadoop.ipc.Client$Connection.receiveRpcResponse(Client.java:1203) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1094) Potentially hanging thread: ContainersLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: ApplicationMasterLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=802 (was 791) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=576 (was 450) - SystemLoadAverage LEAK? -, ProcessCount=27 (was 17) - ProcessCount LEAK? -, AvailableMemoryMB=3632 (was 2361) - AvailableMemoryMB LEAK? - 2024-12-01T17:08:12,862 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=793 is superior to 500 2024-12-01T17:08:12,883 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=795, OpenFileDescriptor=802, MaxFileDescriptor=1048576, SystemLoadAverage=576, ProcessCount=28, AvailableMemoryMB=3631 2024-12-01T17:08:12,883 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=795 is superior to 500 2024-12-01T17:08:12,885 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T17:08:12,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl 2024-12-01T17:08:12,888 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-01T17:08:12,888 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:08:12,889 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithResetTtl" procId is: 63 2024-12-01T17:08:12,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-01T17:08:12,890 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-01T17:08:12,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741958_1134 (size=404) 2024-12-01T17:08:12,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741958_1134 (size=404) 2024-12-01T17:08:12,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741958_1134 (size=404) 2024-12-01T17:08:12,974 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => a6aa36629f27a19a8436f2bd4251fc80, NAME => 'testtb-testExportWithResetTtl,1,1733072892885.a6aa36629f27a19a8436f2bd4251fc80.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:08:12,979 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => c8a2698e1c1b6f84eec48bde35425d3b, NAME => 'testtb-testExportWithResetTtl,,1733072892885.c8a2698e1c1b6f84eec48bde35425d3b.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:08:13,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-01T17:08:13,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741959_1135 (size=65) 2024-12-01T17:08:13,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741959_1135 (size=65) 2024-12-01T17:08:13,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741959_1135 (size=65) 2024-12-01T17:08:13,087 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1733072892885.a6aa36629f27a19a8436f2bd4251fc80.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:08:13,087 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing a6aa36629f27a19a8436f2bd4251fc80, disabling compactions & flushes 2024-12-01T17:08:13,087 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1733072892885.a6aa36629f27a19a8436f2bd4251fc80. 2024-12-01T17:08:13,087 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1733072892885.a6aa36629f27a19a8436f2bd4251fc80. 2024-12-01T17:08:13,087 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1733072892885.a6aa36629f27a19a8436f2bd4251fc80. after waiting 0 ms 2024-12-01T17:08:13,087 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1733072892885.a6aa36629f27a19a8436f2bd4251fc80. 2024-12-01T17:08:13,087 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1733072892885.a6aa36629f27a19a8436f2bd4251fc80. 2024-12-01T17:08:13,087 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for a6aa36629f27a19a8436f2bd4251fc80: Waiting for close lock at 1733072893087Disabling compacts and flushes for region at 1733072893087Disabling writes for close at 1733072893087Writing region close event to WAL at 1733072893087Closed at 1733072893087 2024-12-01T17:08:13,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741960_1136 (size=65) 2024-12-01T17:08:13,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741960_1136 (size=65) 2024-12-01T17:08:13,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741960_1136 (size=65) 2024-12-01T17:08:13,126 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1733072892885.c8a2698e1c1b6f84eec48bde35425d3b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:08:13,126 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing c8a2698e1c1b6f84eec48bde35425d3b, disabling compactions & flushes 2024-12-01T17:08:13,126 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1733072892885.c8a2698e1c1b6f84eec48bde35425d3b. 2024-12-01T17:08:13,126 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1733072892885.c8a2698e1c1b6f84eec48bde35425d3b. 2024-12-01T17:08:13,126 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1733072892885.c8a2698e1c1b6f84eec48bde35425d3b. after waiting 0 ms 2024-12-01T17:08:13,126 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1733072892885.c8a2698e1c1b6f84eec48bde35425d3b. 2024-12-01T17:08:13,127 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1733072892885.c8a2698e1c1b6f84eec48bde35425d3b. 2024-12-01T17:08:13,127 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for c8a2698e1c1b6f84eec48bde35425d3b: Waiting for close lock at 1733072893126Disabling compacts and flushes for region at 1733072893126Disabling writes for close at 1733072893126Writing region close event to WAL at 1733072893126Closed at 1733072893126 2024-12-01T17:08:13,128 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-01T17:08:13,129 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,1,1733072892885.a6aa36629f27a19a8436f2bd4251fc80.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733072893128"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733072893128"}]},"ts":"1733072893128"} 2024-12-01T17:08:13,129 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,,1733072892885.c8a2698e1c1b6f84eec48bde35425d3b.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733072893128"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733072893128"}]},"ts":"1733072893128"} 2024-12-01T17:08:13,133 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-01T17:08:13,134 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-01T17:08:13,134 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733072893134"}]},"ts":"1733072893134"} 2024-12-01T17:08:13,144 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-01T17:08:13,145 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {b87883a7882d=0} racks are {/default-rack=0} 2024-12-01T17:08:13,147 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-01T17:08:13,147 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-01T17:08:13,147 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-01T17:08:13,147 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-01T17:08:13,147 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-01T17:08:13,147 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-01T17:08:13,147 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-01T17:08:13,147 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-01T17:08:13,147 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-01T17:08:13,147 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-01T17:08:13,148 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=c8a2698e1c1b6f84eec48bde35425d3b, ASSIGN}, {pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=a6aa36629f27a19a8436f2bd4251fc80, ASSIGN}] 2024-12-01T17:08:13,150 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=a6aa36629f27a19a8436f2bd4251fc80, ASSIGN 2024-12-01T17:08:13,151 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=c8a2698e1c1b6f84eec48bde35425d3b, ASSIGN 2024-12-01T17:08:13,152 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=a6aa36629f27a19a8436f2bd4251fc80, ASSIGN; state=OFFLINE, location=b87883a7882d,41375,1733072737967; forceNewPlan=false, retain=false 2024-12-01T17:08:13,152 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=c8a2698e1c1b6f84eec48bde35425d3b, ASSIGN; state=OFFLINE, location=b87883a7882d,36521,1733072737896; forceNewPlan=false, retain=false 2024-12-01T17:08:13,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-01T17:08:13,304 INFO [b87883a7882d:38737 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-01T17:08:13,304 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=65 updating hbase:meta row=a6aa36629f27a19a8436f2bd4251fc80, regionState=OPENING, regionLocation=b87883a7882d,41375,1733072737967 2024-12-01T17:08:13,305 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=64 updating hbase:meta row=c8a2698e1c1b6f84eec48bde35425d3b, regionState=OPENING, regionLocation=b87883a7882d,36521,1733072737896 2024-12-01T17:08:13,310 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=c8a2698e1c1b6f84eec48bde35425d3b, ASSIGN because future has completed 2024-12-01T17:08:13,310 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=66, ppid=64, state=RUNNABLE, hasLock=false; OpenRegionProcedure c8a2698e1c1b6f84eec48bde35425d3b, server=b87883a7882d,36521,1733072737896}] 2024-12-01T17:08:13,345 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=a6aa36629f27a19a8436f2bd4251fc80, ASSIGN because future has completed 2024-12-01T17:08:13,346 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=67, ppid=65, state=RUNNABLE, hasLock=false; OpenRegionProcedure a6aa36629f27a19a8436f2bd4251fc80, server=b87883a7882d,41375,1733072737967}] 2024-12-01T17:08:13,508 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,,1733072892885.c8a2698e1c1b6f84eec48bde35425d3b. 2024-12-01T17:08:13,508 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7752): Opening region: {ENCODED => c8a2698e1c1b6f84eec48bde35425d3b, NAME => 'testtb-testExportWithResetTtl,,1733072892885.c8a2698e1c1b6f84eec48bde35425d3b.', STARTKEY => '', ENDKEY => '1'} 2024-12-01T17:08:13,508 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,,1733072892885.c8a2698e1c1b6f84eec48bde35425d3b. service=AccessControlService 2024-12-01T17:08:13,509 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-01T17:08:13,509 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl c8a2698e1c1b6f84eec48bde35425d3b 2024-12-01T17:08:13,509 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1733072892885.c8a2698e1c1b6f84eec48bde35425d3b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:08:13,509 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7794): checking encryption for c8a2698e1c1b6f84eec48bde35425d3b 2024-12-01T17:08:13,510 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7797): checking classloading for c8a2698e1c1b6f84eec48bde35425d3b 2024-12-01T17:08:13,514 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,1,1733072892885.a6aa36629f27a19a8436f2bd4251fc80. 2024-12-01T17:08:13,515 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7752): Opening region: {ENCODED => a6aa36629f27a19a8436f2bd4251fc80, NAME => 'testtb-testExportWithResetTtl,1,1733072892885.a6aa36629f27a19a8436f2bd4251fc80.', STARTKEY => '1', ENDKEY => ''} 2024-12-01T17:08:13,515 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,1,1733072892885.a6aa36629f27a19a8436f2bd4251fc80. service=AccessControlService 2024-12-01T17:08:13,515 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-01T17:08:13,516 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl a6aa36629f27a19a8436f2bd4251fc80 2024-12-01T17:08:13,516 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1733072892885.a6aa36629f27a19a8436f2bd4251fc80.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:08:13,516 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7794): checking encryption for a6aa36629f27a19a8436f2bd4251fc80 2024-12-01T17:08:13,516 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7797): checking classloading for a6aa36629f27a19a8436f2bd4251fc80 2024-12-01T17:08:13,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-01T17:08:13,531 INFO [StoreOpener-c8a2698e1c1b6f84eec48bde35425d3b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region c8a2698e1c1b6f84eec48bde35425d3b 2024-12-01T17:08:13,535 INFO [StoreOpener-c8a2698e1c1b6f84eec48bde35425d3b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c8a2698e1c1b6f84eec48bde35425d3b columnFamilyName cf 2024-12-01T17:08:13,536 DEBUG [StoreOpener-c8a2698e1c1b6f84eec48bde35425d3b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:08:13,536 INFO [StoreOpener-a6aa36629f27a19a8436f2bd4251fc80-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region a6aa36629f27a19a8436f2bd4251fc80 2024-12-01T17:08:13,537 INFO [StoreOpener-c8a2698e1c1b6f84eec48bde35425d3b-1 {}] regionserver.HStore(327): Store=c8a2698e1c1b6f84eec48bde35425d3b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T17:08:13,537 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1038): replaying wal for c8a2698e1c1b6f84eec48bde35425d3b 2024-12-01T17:08:13,538 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithResetTtl/c8a2698e1c1b6f84eec48bde35425d3b 2024-12-01T17:08:13,539 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithResetTtl/c8a2698e1c1b6f84eec48bde35425d3b 2024-12-01T17:08:13,539 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1048): stopping wal replay for c8a2698e1c1b6f84eec48bde35425d3b 2024-12-01T17:08:13,539 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1060): Cleaning up temporary data for c8a2698e1c1b6f84eec48bde35425d3b 2024-12-01T17:08:13,539 INFO [StoreOpener-a6aa36629f27a19a8436f2bd4251fc80-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a6aa36629f27a19a8436f2bd4251fc80 columnFamilyName cf 2024-12-01T17:08:13,540 DEBUG [StoreOpener-a6aa36629f27a19a8436f2bd4251fc80-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:08:13,541 INFO [StoreOpener-a6aa36629f27a19a8436f2bd4251fc80-1 {}] regionserver.HStore(327): Store=a6aa36629f27a19a8436f2bd4251fc80/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T17:08:13,541 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1038): replaying wal for a6aa36629f27a19a8436f2bd4251fc80 2024-12-01T17:08:13,541 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1093): writing seq id for c8a2698e1c1b6f84eec48bde35425d3b 2024-12-01T17:08:13,542 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithResetTtl/a6aa36629f27a19a8436f2bd4251fc80 2024-12-01T17:08:13,542 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithResetTtl/a6aa36629f27a19a8436f2bd4251fc80 2024-12-01T17:08:13,543 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1048): stopping wal replay for a6aa36629f27a19a8436f2bd4251fc80 2024-12-01T17:08:13,544 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1060): Cleaning up temporary data for a6aa36629f27a19a8436f2bd4251fc80 2024-12-01T17:08:13,546 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1093): writing seq id for a6aa36629f27a19a8436f2bd4251fc80 2024-12-01T17:08:13,562 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithResetTtl/c8a2698e1c1b6f84eec48bde35425d3b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T17:08:13,563 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1114): Opened c8a2698e1c1b6f84eec48bde35425d3b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63297357, jitterRate=-0.0567958801984787}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T17:08:13,563 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c8a2698e1c1b6f84eec48bde35425d3b 2024-12-01T17:08:13,564 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1006): Region open journal for c8a2698e1c1b6f84eec48bde35425d3b: Running coprocessor pre-open hook at 1733072893510Writing region info on filesystem at 1733072893510Initializing all the Stores at 1733072893521 (+11 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733072893521Cleaning up temporary data from old regions at 1733072893539 (+18 ms)Running coprocessor post-open hooks at 1733072893563 (+24 ms)Region opened successfully at 1733072893564 (+1 ms) 2024-12-01T17:08:13,567 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,,1733072892885.c8a2698e1c1b6f84eec48bde35425d3b., pid=66, masterSystemTime=1733072893503 2024-12-01T17:08:13,572 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=64 updating hbase:meta row=c8a2698e1c1b6f84eec48bde35425d3b, regionState=OPEN, openSeqNum=2, regionLocation=b87883a7882d,36521,1733072737896 2024-12-01T17:08:13,576 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=66, ppid=64, state=RUNNABLE, hasLock=false; OpenRegionProcedure c8a2698e1c1b6f84eec48bde35425d3b, server=b87883a7882d,36521,1733072737896 because future has completed 2024-12-01T17:08:13,582 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=66, resume processing ppid=64 2024-12-01T17:08:13,582 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=66, ppid=64, state=SUCCESS, hasLock=false; OpenRegionProcedure c8a2698e1c1b6f84eec48bde35425d3b, server=b87883a7882d,36521,1733072737896 in 268 msec 2024-12-01T17:08:13,582 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithResetTtl/a6aa36629f27a19a8436f2bd4251fc80/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T17:08:13,583 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1114): Opened a6aa36629f27a19a8436f2bd4251fc80; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73387415, jitterRate=0.0935577005147934}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T17:08:13,583 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a6aa36629f27a19a8436f2bd4251fc80 2024-12-01T17:08:13,584 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1006): Region open journal for a6aa36629f27a19a8436f2bd4251fc80: Running coprocessor pre-open hook at 1733072893516Writing region info on filesystem at 1733072893516Initializing all the Stores at 1733072893521 (+5 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733072893521Cleaning up temporary data from old regions at 1733072893544 (+23 ms)Running coprocessor post-open hooks at 1733072893583 (+39 ms)Region opened successfully at 1733072893584 (+1 ms) 2024-12-01T17:08:13,584 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,,1733072892885.c8a2698e1c1b6f84eec48bde35425d3b. 2024-12-01T17:08:13,584 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,,1733072892885.c8a2698e1c1b6f84eec48bde35425d3b. 2024-12-01T17:08:13,585 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,1,1733072892885.a6aa36629f27a19a8436f2bd4251fc80., pid=67, masterSystemTime=1733072893507 2024-12-01T17:08:13,588 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=64, ppid=63, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=c8a2698e1c1b6f84eec48bde35425d3b, ASSIGN in 434 msec 2024-12-01T17:08:13,589 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,1,1733072892885.a6aa36629f27a19a8436f2bd4251fc80. 2024-12-01T17:08:13,589 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,1,1733072892885.a6aa36629f27a19a8436f2bd4251fc80. 2024-12-01T17:08:13,590 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=65 updating hbase:meta row=a6aa36629f27a19a8436f2bd4251fc80, regionState=OPEN, openSeqNum=2, regionLocation=b87883a7882d,41375,1733072737967 2024-12-01T17:08:13,593 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=67, ppid=65, state=RUNNABLE, hasLock=false; OpenRegionProcedure a6aa36629f27a19a8436f2bd4251fc80, server=b87883a7882d,41375,1733072737967 because future has completed 2024-12-01T17:08:13,601 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=67, resume processing ppid=65 2024-12-01T17:08:13,601 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=67, ppid=65, state=SUCCESS, hasLock=false; OpenRegionProcedure a6aa36629f27a19a8436f2bd4251fc80, server=b87883a7882d,41375,1733072737967 in 250 msec 2024-12-01T17:08:13,612 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=65, resume processing ppid=63 2024-12-01T17:08:13,612 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=65, ppid=63, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=a6aa36629f27a19a8436f2bd4251fc80, ASSIGN in 453 msec 2024-12-01T17:08:13,613 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-01T17:08:13,614 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733072893613"}]},"ts":"1733072893613"} 2024-12-01T17:08:13,617 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-01T17:08:13,618 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-01T17:08:13,619 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithResetTtl jenkins: RWXCA 2024-12-01T17:08:13,633 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36521 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-01T17:08:13,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:08:13,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:08:13,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:08:13,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:08:13,667 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-01T17:08:13,667 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-01T17:08:13,668 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-01T17:08:13,668 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-01T17:08:13,678 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=63, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl in 782 msec 2024-12-01T17:08:14,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-01T17:08:14,033 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-01T17:08:14,033 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportWithResetTtl get assigned. Timeout = 60000ms 2024-12-01T17:08:14,034 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T17:08:14,040 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportWithResetTtl assigned to meta. Checking AM states. 2024-12-01T17:08:14,040 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T17:08:14,040 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportWithResetTtl assigned. 2024-12-01T17:08:14,040 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-01T17:08:14,046 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-01T17:08:14,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733072894046 (current time:1733072894046). 2024-12-01T17:08:14,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-01T17:08:14,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-01T17:08:14,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-01T17:08:14,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6eaa8415, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:08:14,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:08:14,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:08:14,061 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:08:14,062 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:08:14,062 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:08:14,062 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1cc5cd48, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:08:14,062 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:08:14,063 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:08:14,063 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:08:14,065 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57018, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:08:14,066 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e324a1f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:08:14,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:08:14,069 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:08:14,069 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:08:14,071 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54126, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:08:14,073 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737. 2024-12-01T17:08:14,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:08:14,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:08:14,073 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:08:14,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:08:14,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@39ee7991, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:08:14,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:08:14,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:08:14,084 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:08:14,085 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:08:14,085 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:08:14,085 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79c338e6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:08:14,085 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:08:14,086 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:08:14,086 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:08:14,088 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57046, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:08:14,089 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1879bbe0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:08:14,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:08:14,092 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:08:14,092 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:08:14,094 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54132, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:08:14,096 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:08:14,097 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:08:14,099 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34770, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:08:14,102 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737. 2024-12-01T17:08:14,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:08:14,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:08:14,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:08:14,103 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:08:14,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-01T17:08:14,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-01T17:08:14,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-01T17:08:14,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-01T17:08:14,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-01T17:08:14,114 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-01T17:08:14,115 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-01T17:08:14,119 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-01T17:08:14,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741961_1137 (size=161) 2024-12-01T17:08:14,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741961_1137 (size=161) 2024-12-01T17:08:14,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741961_1137 (size=161) 2024-12-01T17:08:14,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-01T17:08:14,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-01T17:08:14,574 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-01T17:08:14,574 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c8a2698e1c1b6f84eec48bde35425d3b}, {pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a6aa36629f27a19a8436f2bd4251fc80}] 2024-12-01T17:08:14,575 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a6aa36629f27a19a8436f2bd4251fc80 2024-12-01T17:08:14,575 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c8a2698e1c1b6f84eec48bde35425d3b 2024-12-01T17:08:14,735 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36521 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=69 2024-12-01T17:08:14,736 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41375 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=70 2024-12-01T17:08:14,736 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1733072892885.a6aa36629f27a19a8436f2bd4251fc80. 2024-12-01T17:08:14,736 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.HRegion(2603): Flush status journal for a6aa36629f27a19a8436f2bd4251fc80: 2024-12-01T17:08:14,736 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1733072892885.a6aa36629f27a19a8436f2bd4251fc80. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-01T17:08:14,737 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1733072892885.a6aa36629f27a19a8436f2bd4251fc80.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-01T17:08:14,737 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:08:14,737 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-01T17:08:14,738 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1733072892885.c8a2698e1c1b6f84eec48bde35425d3b. 2024-12-01T17:08:14,738 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.HRegion(2603): Flush status journal for c8a2698e1c1b6f84eec48bde35425d3b: 2024-12-01T17:08:14,738 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1733072892885.c8a2698e1c1b6f84eec48bde35425d3b. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-01T17:08:14,738 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1733072892885.c8a2698e1c1b6f84eec48bde35425d3b.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-01T17:08:14,739 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:08:14,739 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-01T17:08:14,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-01T17:08:14,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741963_1139 (size=68) 2024-12-01T17:08:14,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741962_1138 (size=68) 2024-12-01T17:08:14,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741962_1138 (size=68) 2024-12-01T17:08:14,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741962_1138 (size=68) 2024-12-01T17:08:14,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741963_1139 (size=68) 2024-12-01T17:08:14,802 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1733072892885.c8a2698e1c1b6f84eec48bde35425d3b. 2024-12-01T17:08:14,802 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=69 2024-12-01T17:08:14,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741963_1139 (size=68) 2024-12-01T17:08:14,803 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1733072892885.a6aa36629f27a19a8436f2bd4251fc80. 2024-12-01T17:08:14,803 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-12-01T17:08:14,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=69 2024-12-01T17:08:14,803 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region c8a2698e1c1b6f84eec48bde35425d3b 2024-12-01T17:08:14,803 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c8a2698e1c1b6f84eec48bde35425d3b 2024-12-01T17:08:14,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=70 2024-12-01T17:08:14,803 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region a6aa36629f27a19a8436f2bd4251fc80 2024-12-01T17:08:14,803 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a6aa36629f27a19a8436f2bd4251fc80 2024-12-01T17:08:14,810 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=69, ppid=68, state=SUCCESS, hasLock=false; SnapshotRegionProcedure c8a2698e1c1b6f84eec48bde35425d3b in 231 msec 2024-12-01T17:08:14,811 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=70, resume processing ppid=68 2024-12-01T17:08:14,811 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-01T17:08:14,811 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=70, ppid=68, state=SUCCESS, hasLock=false; SnapshotRegionProcedure a6aa36629f27a19a8436f2bd4251fc80 in 231 msec 2024-12-01T17:08:14,812 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-01T17:08:14,813 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-01T17:08:14,813 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithResetTtl 2024-12-01T17:08:14,814 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl 2024-12-01T17:08:14,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741964_1140 (size=543) 2024-12-01T17:08:14,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741964_1140 (size=543) 2024-12-01T17:08:14,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741964_1140 (size=543) 2024-12-01T17:08:14,832 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-01T17:08:14,837 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-01T17:08:14,838 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/emptySnaptb0-testExportWithResetTtl 2024-12-01T17:08:14,839 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-01T17:08:14,839 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-01T17:08:14,841 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=68, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 735 msec 2024-12-01T17:08:15,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-01T17:08:15,251 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-01T17:08:15,257 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='0aad962a42adbe8b4853e2953de628129', locateType=CURRENT is [region=testtb-testExportWithResetTtl,,1733072892885.c8a2698e1c1b6f84eec48bde35425d3b., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:08:15,259 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='1753902841e20204e77181b0b92d08f32', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1733072892885.a6aa36629f27a19a8436f2bd4251fc80., hostname=b87883a7882d,41375,1733072737967, seqNum=2] 2024-12-01T17:08:15,261 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='217ceeb2578f8fb1d69dcb852e1f12883', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1733072892885.a6aa36629f27a19a8436f2bd4251fc80., hostname=b87883a7882d,41375,1733072737967, seqNum=2] 2024-12-01T17:08:15,262 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='3431c6ff9f38ca96d4dca5abe2b107c84', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1733072892885.a6aa36629f27a19a8436f2bd4251fc80., hostname=b87883a7882d,41375,1733072737967, seqNum=2] 2024-12-01T17:08:15,262 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='46e50ec61757562f461500eee6d3bb1d1', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1733072892885.a6aa36629f27a19a8436f2bd4251fc80., hostname=b87883a7882d,41375,1733072737967, seqNum=2] 2024-12-01T17:08:15,265 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:47536, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:08:15,265 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36521 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,,1733072892885.c8a2698e1c1b6f84eec48bde35425d3b. with WAL disabled. Data may be lost in the event of a crash. 2024-12-01T17:08:15,267 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41375 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,1,1733072892885.a6aa36629f27a19a8436f2bd4251fc80. with WAL disabled. Data may be lost in the event of a crash. 2024-12-01T17:08:15,269 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-01T17:08:15,271 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithResetTtl 2024-12-01T17:08:15,271 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithResetTtl,,1733072892885.c8a2698e1c1b6f84eec48bde35425d3b. 2024-12-01T17:08:15,272 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T17:08:15,273 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-01T17:08:15,279 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-01T17:08:15,286 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-01T17:08:15,288 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-01T17:08:15,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733072895288 (current time:1733072895288). 2024-12-01T17:08:15,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-01T17:08:15,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-01T17:08:15,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-01T17:08:15,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@666a5d8d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:08:15,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:08:15,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:08:15,290 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:08:15,290 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:08:15,290 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:08:15,290 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1adfe4f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:08:15,290 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:08:15,291 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:08:15,291 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:08:15,291 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57070, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:08:15,292 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d1c499d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:08:15,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:08:15,293 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:08:15,293 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:08:15,294 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54134, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:08:15,295 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737. 2024-12-01T17:08:15,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:08:15,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:08:15,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:08:15,296 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:08:15,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26e7f024, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:08:15,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:08:15,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:08:15,297 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:08:15,298 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:08:15,298 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:08:15,298 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61808e48, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:08:15,298 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:08:15,298 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:08:15,298 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:08:15,299 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57090, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:08:15,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f9d47b1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:08:15,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:08:15,300 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:08:15,301 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:08:15,301 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54136, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:08:15,303 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:08:15,303 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:08:15,304 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34786, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:08:15,305 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737. 2024-12-01T17:08:15,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:08:15,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:08:15,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:08:15,305 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:08:15,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-01T17:08:15,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-01T17:08:15,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-01T17:08:15,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-01T17:08:15,308 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-01T17:08:15,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-01T17:08:15,309 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-01T17:08:15,311 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-01T17:08:15,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741965_1141 (size=156) 2024-12-01T17:08:15,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741965_1141 (size=156) 2024-12-01T17:08:15,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741965_1141 (size=156) 2024-12-01T17:08:15,320 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-01T17:08:15,320 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c8a2698e1c1b6f84eec48bde35425d3b}, {pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a6aa36629f27a19a8436f2bd4251fc80}] 2024-12-01T17:08:15,321 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c8a2698e1c1b6f84eec48bde35425d3b 2024-12-01T17:08:15,321 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a6aa36629f27a19a8436f2bd4251fc80 2024-12-01T17:08:15,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-01T17:08:15,473 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41375 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=73 2024-12-01T17:08:15,473 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1733072892885.a6aa36629f27a19a8436f2bd4251fc80. 2024-12-01T17:08:15,473 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36521 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=72 2024-12-01T17:08:15,473 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1733072892885.c8a2698e1c1b6f84eec48bde35425d3b. 2024-12-01T17:08:15,474 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2902): Flushing c8a2698e1c1b6f84eec48bde35425d3b 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-01T17:08:15,474 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2902): Flushing a6aa36629f27a19a8436f2bd4251fc80 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-01T17:08:15,489 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithResetTtl/c8a2698e1c1b6f84eec48bde35425d3b/.tmp/cf/06862b1efad24e01aea4a300e6ea10c3 is 71, key is 03eef8fdeac04d8a2da726d766663a77/cf:q/1733072895265/Put/seqid=0 2024-12-01T17:08:15,490 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithResetTtl/a6aa36629f27a19a8436f2bd4251fc80/.tmp/cf/a46e25421ffd43369dc339e75260127c is 71, key is 102d55652e8fc4a36afadc1e209bf238/cf:q/1733072895267/Put/seqid=0 2024-12-01T17:08:15,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741966_1142 (size=5356) 2024-12-01T17:08:15,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741966_1142 (size=5356) 2024-12-01T17:08:15,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741966_1142 (size=5356) 2024-12-01T17:08:15,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741967_1143 (size=8256) 2024-12-01T17:08:15,497 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithResetTtl/c8a2698e1c1b6f84eec48bde35425d3b/.tmp/cf/06862b1efad24e01aea4a300e6ea10c3 2024-12-01T17:08:15,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741967_1143 (size=8256) 2024-12-01T17:08:15,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741967_1143 (size=8256) 2024-12-01T17:08:15,498 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithResetTtl/a6aa36629f27a19a8436f2bd4251fc80/.tmp/cf/a46e25421ffd43369dc339e75260127c 2024-12-01T17:08:15,503 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithResetTtl/c8a2698e1c1b6f84eec48bde35425d3b/.tmp/cf/06862b1efad24e01aea4a300e6ea10c3 as hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithResetTtl/c8a2698e1c1b6f84eec48bde35425d3b/cf/06862b1efad24e01aea4a300e6ea10c3 2024-12-01T17:08:15,503 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithResetTtl/a6aa36629f27a19a8436f2bd4251fc80/.tmp/cf/a46e25421ffd43369dc339e75260127c as hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithResetTtl/a6aa36629f27a19a8436f2bd4251fc80/cf/a46e25421ffd43369dc339e75260127c 2024-12-01T17:08:15,508 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithResetTtl/a6aa36629f27a19a8436f2bd4251fc80/cf/a46e25421ffd43369dc339e75260127c, entries=46, sequenceid=6, filesize=8.1 K 2024-12-01T17:08:15,508 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithResetTtl/c8a2698e1c1b6f84eec48bde35425d3b/cf/06862b1efad24e01aea4a300e6ea10c3, entries=4, sequenceid=6, filesize=5.2 K 2024-12-01T17:08:15,509 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for c8a2698e1c1b6f84eec48bde35425d3b in 35ms, sequenceid=6, compaction requested=false 2024-12-01T17:08:15,509 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for a6aa36629f27a19a8436f2bd4251fc80 in 36ms, sequenceid=6, compaction requested=false 2024-12-01T17:08:15,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithResetTtl' 2024-12-01T17:08:15,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithResetTtl' 2024-12-01T17:08:15,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2603): Flush status journal for a6aa36629f27a19a8436f2bd4251fc80: 2024-12-01T17:08:15,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2603): Flush status journal for c8a2698e1c1b6f84eec48bde35425d3b: 2024-12-01T17:08:15,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1733072892885.c8a2698e1c1b6f84eec48bde35425d3b. for snaptb0-testExportWithResetTtl completed. 2024-12-01T17:08:15,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1733072892885.a6aa36629f27a19a8436f2bd4251fc80. for snaptb0-testExportWithResetTtl completed. 2024-12-01T17:08:15,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1733072892885.c8a2698e1c1b6f84eec48bde35425d3b.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-01T17:08:15,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1733072892885.a6aa36629f27a19a8436f2bd4251fc80.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-01T17:08:15,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:08:15,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:08:15,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithResetTtl/a6aa36629f27a19a8436f2bd4251fc80/cf/a46e25421ffd43369dc339e75260127c] hfiles 2024-12-01T17:08:15,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithResetTtl/c8a2698e1c1b6f84eec48bde35425d3b/cf/06862b1efad24e01aea4a300e6ea10c3] hfiles 2024-12-01T17:08:15,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithResetTtl/a6aa36629f27a19a8436f2bd4251fc80/cf/a46e25421ffd43369dc339e75260127c for snapshot=snaptb0-testExportWithResetTtl 2024-12-01T17:08:15,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithResetTtl/c8a2698e1c1b6f84eec48bde35425d3b/cf/06862b1efad24e01aea4a300e6ea10c3 for snapshot=snaptb0-testExportWithResetTtl 2024-12-01T17:08:15,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741969_1145 (size=107) 2024-12-01T17:08:15,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741969_1145 (size=107) 2024-12-01T17:08:15,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741969_1145 (size=107) 2024-12-01T17:08:15,520 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1733072892885.c8a2698e1c1b6f84eec48bde35425d3b. 2024-12-01T17:08:15,520 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-12-01T17:08:15,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=72 2024-12-01T17:08:15,521 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region c8a2698e1c1b6f84eec48bde35425d3b 2024-12-01T17:08:15,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741968_1144 (size=107) 2024-12-01T17:08:15,521 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c8a2698e1c1b6f84eec48bde35425d3b 2024-12-01T17:08:15,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741968_1144 (size=107) 2024-12-01T17:08:15,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741968_1144 (size=107) 2024-12-01T17:08:15,522 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1733072892885.a6aa36629f27a19a8436f2bd4251fc80. 2024-12-01T17:08:15,522 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=73 2024-12-01T17:08:15,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=73 2024-12-01T17:08:15,523 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region a6aa36629f27a19a8436f2bd4251fc80 2024-12-01T17:08:15,523 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a6aa36629f27a19a8436f2bd4251fc80 2024-12-01T17:08:15,523 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=72, ppid=71, state=SUCCESS, hasLock=false; SnapshotRegionProcedure c8a2698e1c1b6f84eec48bde35425d3b in 202 msec 2024-12-01T17:08:15,525 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=73, resume processing ppid=71 2024-12-01T17:08:15,525 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=73, ppid=71, state=SUCCESS, hasLock=false; SnapshotRegionProcedure a6aa36629f27a19a8436f2bd4251fc80 in 203 msec 2024-12-01T17:08:15,525 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-01T17:08:15,526 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-01T17:08:15,526 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-01T17:08:15,526 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithResetTtl 2024-12-01T17:08:15,527 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl 2024-12-01T17:08:15,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741970_1146 (size=621) 2024-12-01T17:08:15,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741970_1146 (size=621) 2024-12-01T17:08:15,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741970_1146 (size=621) 2024-12-01T17:08:15,538 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-01T17:08:15,543 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-01T17:08:15,543 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testExportWithResetTtl 2024-12-01T17:08:15,545 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-01T17:08:15,545 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-01T17:08:15,547 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=71, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 239 msec 2024-12-01T17:08:15,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-01T17:08:15,621 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-01T17:08:15,624 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T17:08:15,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=74, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportWithResetTtl 2024-12-01T17:08:15,630 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-01T17:08:15,630 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:08:15,630 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testExportWithResetTtl" procId is: 74 2024-12-01T17:08:15,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-01T17:08:15,632 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-01T17:08:15,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741971_1147 (size=397) 2024-12-01T17:08:15,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741971_1147 (size=397) 2024-12-01T17:08:15,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741971_1147 (size=397) 2024-12-01T17:08:15,646 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 87c35a61769cdc3e9a78a17b2c2c9818, NAME => 'testExportWithResetTtl,,1733072895624.87c35a61769cdc3e9a78a17b2c2c9818.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:08:15,646 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 464d87381b65c6f1ab954340c4b06d00, NAME => 'testExportWithResetTtl,1,1733072895624.464d87381b65c6f1ab954340c4b06d00.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:08:15,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741972_1148 (size=58) 2024-12-01T17:08:15,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741973_1149 (size=58) 2024-12-01T17:08:15,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741972_1148 (size=58) 2024-12-01T17:08:15,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741973_1149 (size=58) 2024-12-01T17:08:15,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741972_1148 (size=58) 2024-12-01T17:08:15,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741973_1149 (size=58) 2024-12-01T17:08:15,657 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1733072895624.464d87381b65c6f1ab954340c4b06d00.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:08:15,657 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1733072895624.87c35a61769cdc3e9a78a17b2c2c9818.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:08:15,658 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing 464d87381b65c6f1ab954340c4b06d00, disabling compactions & flushes 2024-12-01T17:08:15,658 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing 87c35a61769cdc3e9a78a17b2c2c9818, disabling compactions & flushes 2024-12-01T17:08:15,658 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1733072895624.464d87381b65c6f1ab954340c4b06d00. 2024-12-01T17:08:15,658 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1733072895624.87c35a61769cdc3e9a78a17b2c2c9818. 2024-12-01T17:08:15,658 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1733072895624.464d87381b65c6f1ab954340c4b06d00. 2024-12-01T17:08:15,658 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1733072895624.87c35a61769cdc3e9a78a17b2c2c9818. 2024-12-01T17:08:15,658 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1733072895624.464d87381b65c6f1ab954340c4b06d00. after waiting 0 ms 2024-12-01T17:08:15,658 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1733072895624.87c35a61769cdc3e9a78a17b2c2c9818. after waiting 0 ms 2024-12-01T17:08:15,658 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1733072895624.464d87381b65c6f1ab954340c4b06d00. 2024-12-01T17:08:15,658 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1733072895624.87c35a61769cdc3e9a78a17b2c2c9818. 2024-12-01T17:08:15,658 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1733072895624.464d87381b65c6f1ab954340c4b06d00. 2024-12-01T17:08:15,658 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1733072895624.87c35a61769cdc3e9a78a17b2c2c9818. 2024-12-01T17:08:15,658 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for 464d87381b65c6f1ab954340c4b06d00: Waiting for close lock at 1733072895657Disabling compacts and flushes for region at 1733072895657Disabling writes for close at 1733072895658 (+1 ms)Writing region close event to WAL at 1733072895658Closed at 1733072895658 2024-12-01T17:08:15,658 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 87c35a61769cdc3e9a78a17b2c2c9818: Waiting for close lock at 1733072895657Disabling compacts and flushes for region at 1733072895657Disabling writes for close at 1733072895658 (+1 ms)Writing region close event to WAL at 1733072895658Closed at 1733072895658 2024-12-01T17:08:15,659 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-01T17:08:15,659 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,1,1733072895624.464d87381b65c6f1ab954340c4b06d00.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1733072895659"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733072895659"}]},"ts":"1733072895659"} 2024-12-01T17:08:15,659 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,,1733072895624.87c35a61769cdc3e9a78a17b2c2c9818.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1733072895659"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733072895659"}]},"ts":"1733072895659"} 2024-12-01T17:08:15,662 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-01T17:08:15,663 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-01T17:08:15,663 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733072895663"}]},"ts":"1733072895663"} 2024-12-01T17:08:15,665 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-01T17:08:15,665 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {b87883a7882d=0} racks are {/default-rack=0} 2024-12-01T17:08:15,667 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-01T17:08:15,667 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-01T17:08:15,667 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-01T17:08:15,667 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-01T17:08:15,667 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-01T17:08:15,667 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-01T17:08:15,667 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-01T17:08:15,667 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-01T17:08:15,667 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-01T17:08:15,667 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-01T17:08:15,667 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=87c35a61769cdc3e9a78a17b2c2c9818, ASSIGN}, {pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=464d87381b65c6f1ab954340c4b06d00, ASSIGN}] 2024-12-01T17:08:15,668 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=464d87381b65c6f1ab954340c4b06d00, ASSIGN 2024-12-01T17:08:15,668 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=87c35a61769cdc3e9a78a17b2c2c9818, ASSIGN 2024-12-01T17:08:15,669 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=464d87381b65c6f1ab954340c4b06d00, ASSIGN; state=OFFLINE, location=b87883a7882d,36521,1733072737896; forceNewPlan=false, retain=false 2024-12-01T17:08:15,669 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=87c35a61769cdc3e9a78a17b2c2c9818, ASSIGN; state=OFFLINE, location=b87883a7882d,44753,1733072737741; forceNewPlan=false, retain=false 2024-12-01T17:08:15,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-01T17:08:15,820 INFO [b87883a7882d:38737 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-01T17:08:15,820 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=75 updating hbase:meta row=87c35a61769cdc3e9a78a17b2c2c9818, regionState=OPENING, regionLocation=b87883a7882d,44753,1733072737741 2024-12-01T17:08:15,820 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=76 updating hbase:meta row=464d87381b65c6f1ab954340c4b06d00, regionState=OPENING, regionLocation=b87883a7882d,36521,1733072737896 2024-12-01T17:08:15,822 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=87c35a61769cdc3e9a78a17b2c2c9818, ASSIGN because future has completed 2024-12-01T17:08:15,822 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=77, ppid=75, state=RUNNABLE, hasLock=false; OpenRegionProcedure 87c35a61769cdc3e9a78a17b2c2c9818, server=b87883a7882d,44753,1733072737741}] 2024-12-01T17:08:15,823 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=464d87381b65c6f1ab954340c4b06d00, ASSIGN because future has completed 2024-12-01T17:08:15,823 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=78, ppid=76, state=RUNNABLE, hasLock=false; OpenRegionProcedure 464d87381b65c6f1ab954340c4b06d00, server=b87883a7882d,36521,1733072737896}] 2024-12-01T17:08:15,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-01T17:08:15,979 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,,1733072895624.87c35a61769cdc3e9a78a17b2c2c9818. 2024-12-01T17:08:15,979 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7752): Opening region: {ENCODED => 87c35a61769cdc3e9a78a17b2c2c9818, NAME => 'testExportWithResetTtl,,1733072895624.87c35a61769cdc3e9a78a17b2c2c9818.', STARTKEY => '', ENDKEY => '1'} 2024-12-01T17:08:15,980 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,,1733072895624.87c35a61769cdc3e9a78a17b2c2c9818. service=AccessControlService 2024-12-01T17:08:15,980 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,1,1733072895624.464d87381b65c6f1ab954340c4b06d00. 2024-12-01T17:08:15,980 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-01T17:08:15,980 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7752): Opening region: {ENCODED => 464d87381b65c6f1ab954340c4b06d00, NAME => 'testExportWithResetTtl,1,1733072895624.464d87381b65c6f1ab954340c4b06d00.', STARTKEY => '1', ENDKEY => ''} 2024-12-01T17:08:15,981 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 87c35a61769cdc3e9a78a17b2c2c9818 2024-12-01T17:08:15,981 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1733072895624.87c35a61769cdc3e9a78a17b2c2c9818.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:08:15,981 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,1,1733072895624.464d87381b65c6f1ab954340c4b06d00. service=AccessControlService 2024-12-01T17:08:15,981 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7794): checking encryption for 87c35a61769cdc3e9a78a17b2c2c9818 2024-12-01T17:08:15,981 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7797): checking classloading for 87c35a61769cdc3e9a78a17b2c2c9818 2024-12-01T17:08:15,981 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-01T17:08:15,981 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 464d87381b65c6f1ab954340c4b06d00 2024-12-01T17:08:15,981 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1733072895624.464d87381b65c6f1ab954340c4b06d00.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:08:15,982 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7794): checking encryption for 464d87381b65c6f1ab954340c4b06d00 2024-12-01T17:08:15,982 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7797): checking classloading for 464d87381b65c6f1ab954340c4b06d00 2024-12-01T17:08:15,983 INFO [StoreOpener-87c35a61769cdc3e9a78a17b2c2c9818-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 87c35a61769cdc3e9a78a17b2c2c9818 2024-12-01T17:08:15,984 INFO [StoreOpener-464d87381b65c6f1ab954340c4b06d00-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 464d87381b65c6f1ab954340c4b06d00 2024-12-01T17:08:15,986 INFO [StoreOpener-87c35a61769cdc3e9a78a17b2c2c9818-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 87c35a61769cdc3e9a78a17b2c2c9818 columnFamilyName cf 2024-12-01T17:08:15,986 DEBUG [StoreOpener-87c35a61769cdc3e9a78a17b2c2c9818-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:08:15,986 INFO [StoreOpener-464d87381b65c6f1ab954340c4b06d00-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 464d87381b65c6f1ab954340c4b06d00 columnFamilyName cf 2024-12-01T17:08:15,986 INFO [StoreOpener-87c35a61769cdc3e9a78a17b2c2c9818-1 {}] regionserver.HStore(327): Store=87c35a61769cdc3e9a78a17b2c2c9818/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T17:08:15,986 DEBUG [StoreOpener-464d87381b65c6f1ab954340c4b06d00-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:08:15,987 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1038): replaying wal for 87c35a61769cdc3e9a78a17b2c2c9818 2024-12-01T17:08:15,987 INFO [StoreOpener-464d87381b65c6f1ab954340c4b06d00-1 {}] regionserver.HStore(327): Store=464d87381b65c6f1ab954340c4b06d00/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T17:08:15,988 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1038): replaying wal for 464d87381b65c6f1ab954340c4b06d00 2024-12-01T17:08:15,988 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportWithResetTtl/87c35a61769cdc3e9a78a17b2c2c9818 2024-12-01T17:08:15,988 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportWithResetTtl/87c35a61769cdc3e9a78a17b2c2c9818 2024-12-01T17:08:15,988 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportWithResetTtl/464d87381b65c6f1ab954340c4b06d00 2024-12-01T17:08:15,989 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1048): stopping wal replay for 87c35a61769cdc3e9a78a17b2c2c9818 2024-12-01T17:08:15,989 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1060): Cleaning up temporary data for 87c35a61769cdc3e9a78a17b2c2c9818 2024-12-01T17:08:15,989 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportWithResetTtl/464d87381b65c6f1ab954340c4b06d00 2024-12-01T17:08:15,989 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1048): stopping wal replay for 464d87381b65c6f1ab954340c4b06d00 2024-12-01T17:08:15,989 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1060): Cleaning up temporary data for 464d87381b65c6f1ab954340c4b06d00 2024-12-01T17:08:15,990 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1093): writing seq id for 87c35a61769cdc3e9a78a17b2c2c9818 2024-12-01T17:08:15,991 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1093): writing seq id for 464d87381b65c6f1ab954340c4b06d00 2024-12-01T17:08:15,992 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportWithResetTtl/87c35a61769cdc3e9a78a17b2c2c9818/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T17:08:15,993 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1114): Opened 87c35a61769cdc3e9a78a17b2c2c9818; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62743963, jitterRate=-0.06504209339618683}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T17:08:15,993 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 87c35a61769cdc3e9a78a17b2c2c9818 2024-12-01T17:08:15,993 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportWithResetTtl/464d87381b65c6f1ab954340c4b06d00/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T17:08:15,993 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1114): Opened 464d87381b65c6f1ab954340c4b06d00; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62459903, jitterRate=-0.0692749172449112}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T17:08:15,993 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1006): Region open journal for 87c35a61769cdc3e9a78a17b2c2c9818: Running coprocessor pre-open hook at 1733072895981Writing region info on filesystem at 1733072895981Initializing all the Stores at 1733072895983 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733072895983Cleaning up temporary data from old regions at 1733072895989 (+6 ms)Running coprocessor post-open hooks at 1733072895993 (+4 ms)Region opened successfully at 1733072895993 2024-12-01T17:08:15,993 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 464d87381b65c6f1ab954340c4b06d00 2024-12-01T17:08:15,994 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1006): Region open journal for 464d87381b65c6f1ab954340c4b06d00: Running coprocessor pre-open hook at 1733072895982Writing region info on filesystem at 1733072895982Initializing all the Stores at 1733072895983 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733072895983Cleaning up temporary data from old regions at 1733072895989 (+6 ms)Running coprocessor post-open hooks at 1733072895993 (+4 ms)Region opened successfully at 1733072895993 2024-12-01T17:08:15,994 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,,1733072895624.87c35a61769cdc3e9a78a17b2c2c9818., pid=77, masterSystemTime=1733072895974 2024-12-01T17:08:15,994 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,1,1733072895624.464d87381b65c6f1ab954340c4b06d00., pid=78, masterSystemTime=1733072895975 2024-12-01T17:08:15,996 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,,1733072895624.87c35a61769cdc3e9a78a17b2c2c9818. 2024-12-01T17:08:15,996 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,,1733072895624.87c35a61769cdc3e9a78a17b2c2c9818. 2024-12-01T17:08:15,996 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=75 updating hbase:meta row=87c35a61769cdc3e9a78a17b2c2c9818, regionState=OPEN, openSeqNum=2, regionLocation=b87883a7882d,44753,1733072737741 2024-12-01T17:08:15,996 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,1,1733072895624.464d87381b65c6f1ab954340c4b06d00. 2024-12-01T17:08:15,996 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,1,1733072895624.464d87381b65c6f1ab954340c4b06d00. 2024-12-01T17:08:15,997 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=76 updating hbase:meta row=464d87381b65c6f1ab954340c4b06d00, regionState=OPEN, openSeqNum=2, regionLocation=b87883a7882d,36521,1733072737896 2024-12-01T17:08:15,998 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=77, ppid=75, state=RUNNABLE, hasLock=false; OpenRegionProcedure 87c35a61769cdc3e9a78a17b2c2c9818, server=b87883a7882d,44753,1733072737741 because future has completed 2024-12-01T17:08:15,999 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=78, ppid=76, state=RUNNABLE, hasLock=false; OpenRegionProcedure 464d87381b65c6f1ab954340c4b06d00, server=b87883a7882d,36521,1733072737896 because future has completed 2024-12-01T17:08:16,000 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=77, resume processing ppid=75 2024-12-01T17:08:16,000 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=77, ppid=75, state=SUCCESS, hasLock=false; OpenRegionProcedure 87c35a61769cdc3e9a78a17b2c2c9818, server=b87883a7882d,44753,1733072737741 in 176 msec 2024-12-01T17:08:16,001 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=78, resume processing ppid=76 2024-12-01T17:08:16,001 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=78, ppid=76, state=SUCCESS, hasLock=false; OpenRegionProcedure 464d87381b65c6f1ab954340c4b06d00, server=b87883a7882d,36521,1733072737896 in 176 msec 2024-12-01T17:08:16,001 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=75, ppid=74, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=87c35a61769cdc3e9a78a17b2c2c9818, ASSIGN in 333 msec 2024-12-01T17:08:16,002 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=76, resume processing ppid=74 2024-12-01T17:08:16,002 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=76, ppid=74, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=464d87381b65c6f1ab954340c4b06d00, ASSIGN in 334 msec 2024-12-01T17:08:16,003 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-01T17:08:16,003 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733072896003"}]},"ts":"1733072896003"} 2024-12-01T17:08:16,005 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-01T17:08:16,005 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-01T17:08:16,005 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testExportWithResetTtl jenkins: RWXCA 2024-12-01T17:08:16,008 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36521 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-01T17:08:16,077 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:08:16,077 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:08:16,077 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:08:16,077 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:08:16,120 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-01T17:08:16,120 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-01T17:08:16,120 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-01T17:08:16,120 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-01T17:08:16,120 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-01T17:08:16,120 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-01T17:08:16,120 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-01T17:08:16,120 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-01T17:08:16,121 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=74, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportWithResetTtl in 494 msec 2024-12-01T17:08:16,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-01T17:08:16,261 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportWithResetTtl completed 2024-12-01T17:08:16,261 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testExportWithResetTtl get assigned. Timeout = 60000ms 2024-12-01T17:08:16,261 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T17:08:16,267 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testExportWithResetTtl assigned to meta. Checking AM states. 2024-12-01T17:08:16,267 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T17:08:16,267 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testExportWithResetTtl assigned. 2024-12-01T17:08:16,268 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-01T17:08:16,275 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='073bd485c8dd9576467ab59e74a128385', locateType=CURRENT is [region=testExportWithResetTtl,,1733072895624.87c35a61769cdc3e9a78a17b2c2c9818., hostname=b87883a7882d,44753,1733072737741, seqNum=2] 2024-12-01T17:08:16,277 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='123e71fea139afb9c4baed06a32fe38e4', locateType=CURRENT is [region=testExportWithResetTtl,1,1733072895624.464d87381b65c6f1ab954340c4b06d00., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:08:16,278 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='22c58255c546a7aa3ad7af44279f31a26', locateType=CURRENT is [region=testExportWithResetTtl,1,1733072895624.464d87381b65c6f1ab954340c4b06d00., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:08:16,279 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='339737d93db7626cdbeeb34d22f5371e0', locateType=CURRENT is [region=testExportWithResetTtl,1,1733072895624.464d87381b65c6f1ab954340c4b06d00., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:08:16,280 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='4877002298bfdd6cdf17cf79214c4d0b1', locateType=CURRENT is [region=testExportWithResetTtl,1,1733072895624.464d87381b65c6f1ab954340c4b06d00., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:08:16,286 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44753 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,,1733072895624.87c35a61769cdc3e9a78a17b2c2c9818. with WAL disabled. Data may be lost in the event of a crash. 2024-12-01T17:08:16,287 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36521 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,1,1733072895624.464d87381b65c6f1ab954340c4b06d00. with WAL disabled. Data may be lost in the event of a crash. 2024-12-01T17:08:16,289 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-01T17:08:16,291 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportWithResetTtl 2024-12-01T17:08:16,291 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportWithResetTtl,,1733072895624.87c35a61769cdc3e9a78a17b2c2c9818. 2024-12-01T17:08:16,291 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T17:08:16,293 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-01T17:08:16,298 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-01T17:08:16,304 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-01T17:08:16,307 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-01T17:08:16,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733072896307 (current time:1733072896307). 2024-12-01T17:08:16,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-01T17:08:16,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-01T17:08:16,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4fb36bd4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:08:16,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:08:16,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:08:16,309 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:08:16,309 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:08:16,309 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:08:16,309 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2cbf9faf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:08:16,310 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:08:16,310 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:08:16,310 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:08:16,311 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57120, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:08:16,311 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a580dcd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:08:16,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:08:16,312 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:08:16,313 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:08:16,314 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54138, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:08:16,315 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737. 2024-12-01T17:08:16,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:08:16,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:08:16,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:08:16,315 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:08:16,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3485a953, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:08:16,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:08:16,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:08:16,317 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:08:16,317 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:08:16,317 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:08:16,318 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b6d99ca, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:08:16,318 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:08:16,318 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:08:16,318 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:08:16,319 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57134, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:08:16,319 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@398be3bb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:08:16,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:08:16,321 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:08:16,321 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:08:16,322 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54144, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:08:16,324 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:08:16,324 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:08:16,325 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34788, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:08:16,326 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737. 2024-12-01T17:08:16,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:08:16,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:08:16,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:08:16,327 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:08:16,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-01T17:08:16,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-01T17:08:16,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=79, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-01T17:08:16,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 79 2024-12-01T17:08:16,330 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PREPARE 2024-12-01T17:08:16,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-12-01T17:08:16,331 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-01T17:08:16,333 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-01T17:08:16,339 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportWithResetTtl' 2024-12-01T17:08:16,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741974_1150 (size=143) 2024-12-01T17:08:16,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741974_1150 (size=143) 2024-12-01T17:08:16,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741974_1150 (size=143) 2024-12-01T17:08:16,341 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-01T17:08:16,341 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 87c35a61769cdc3e9a78a17b2c2c9818}, {pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 464d87381b65c6f1ab954340c4b06d00}] 2024-12-01T17:08:16,342 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 87c35a61769cdc3e9a78a17b2c2c9818 2024-12-01T17:08:16,342 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 464d87381b65c6f1ab954340c4b06d00 2024-12-01T17:08:16,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-12-01T17:08:16,495 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36521 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=81 2024-12-01T17:08:16,495 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44753 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=80 2024-12-01T17:08:16,495 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,1,1733072895624.464d87381b65c6f1ab954340c4b06d00. 2024-12-01T17:08:16,495 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,,1733072895624.87c35a61769cdc3e9a78a17b2c2c9818. 2024-12-01T17:08:16,496 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(2902): Flushing 87c35a61769cdc3e9a78a17b2c2c9818 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-01T17:08:16,496 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(2902): Flushing 464d87381b65c6f1ab954340c4b06d00 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-01T17:08:16,519 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportWithResetTtl/87c35a61769cdc3e9a78a17b2c2c9818/.tmp/cf/57470b521b984e2394325a71f471aeca is 71, key is 01e1bacd72149015e94d75704353f9f0/cf:q/1733072896285/Put/seqid=0 2024-12-01T17:08:16,519 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportWithResetTtl/464d87381b65c6f1ab954340c4b06d00/.tmp/cf/5cdd3b0f8eff4955b1e01630639172d3 is 71, key is 1c7bfcf5f0243b17ad5d3ce0bf60c135/cf:q/1733072896287/Put/seqid=0 2024-12-01T17:08:16,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741975_1151 (size=5286) 2024-12-01T17:08:16,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741975_1151 (size=5286) 2024-12-01T17:08:16,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741975_1151 (size=5286) 2024-12-01T17:08:16,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741976_1152 (size=8326) 2024-12-01T17:08:16,526 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportWithResetTtl/87c35a61769cdc3e9a78a17b2c2c9818/.tmp/cf/57470b521b984e2394325a71f471aeca 2024-12-01T17:08:16,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741976_1152 (size=8326) 2024-12-01T17:08:16,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741976_1152 (size=8326) 2024-12-01T17:08:16,527 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportWithResetTtl/464d87381b65c6f1ab954340c4b06d00/.tmp/cf/5cdd3b0f8eff4955b1e01630639172d3 2024-12-01T17:08:16,532 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportWithResetTtl/87c35a61769cdc3e9a78a17b2c2c9818/.tmp/cf/57470b521b984e2394325a71f471aeca as hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportWithResetTtl/87c35a61769cdc3e9a78a17b2c2c9818/cf/57470b521b984e2394325a71f471aeca 2024-12-01T17:08:16,532 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportWithResetTtl/464d87381b65c6f1ab954340c4b06d00/.tmp/cf/5cdd3b0f8eff4955b1e01630639172d3 as hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportWithResetTtl/464d87381b65c6f1ab954340c4b06d00/cf/5cdd3b0f8eff4955b1e01630639172d3 2024-12-01T17:08:16,536 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportWithResetTtl/87c35a61769cdc3e9a78a17b2c2c9818/cf/57470b521b984e2394325a71f471aeca, entries=3, sequenceid=5, filesize=5.2 K 2024-12-01T17:08:16,536 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportWithResetTtl/464d87381b65c6f1ab954340c4b06d00/cf/5cdd3b0f8eff4955b1e01630639172d3, entries=47, sequenceid=5, filesize=8.1 K 2024-12-01T17:08:16,537 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 87c35a61769cdc3e9a78a17b2c2c9818 in 41ms, sequenceid=5, compaction requested=false 2024-12-01T17:08:16,537 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 464d87381b65c6f1ab954340c4b06d00 in 41ms, sequenceid=5, compaction requested=false 2024-12-01T17:08:16,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(2603): Flush status journal for 464d87381b65c6f1ab954340c4b06d00: 2024-12-01T17:08:16,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(2603): Flush status journal for 87c35a61769cdc3e9a78a17b2c2c9818: 2024-12-01T17:08:16,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,,1733072895624.87c35a61769cdc3e9a78a17b2c2c9818. for snaptb-testExportWithResetTtl completed. 2024-12-01T17:08:16,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,1,1733072895624.464d87381b65c6f1ab954340c4b06d00. for snaptb-testExportWithResetTtl completed. 2024-12-01T17:08:16,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,1,1733072895624.464d87381b65c6f1ab954340c4b06d00.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-01T17:08:16,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,,1733072895624.87c35a61769cdc3e9a78a17b2c2c9818.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-01T17:08:16,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:08:16,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:08:16,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportWithResetTtl/87c35a61769cdc3e9a78a17b2c2c9818/cf/57470b521b984e2394325a71f471aeca] hfiles 2024-12-01T17:08:16,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportWithResetTtl/464d87381b65c6f1ab954340c4b06d00/cf/5cdd3b0f8eff4955b1e01630639172d3] hfiles 2024-12-01T17:08:16,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportWithResetTtl/87c35a61769cdc3e9a78a17b2c2c9818/cf/57470b521b984e2394325a71f471aeca for snapshot=snaptb-testExportWithResetTtl 2024-12-01T17:08:16,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportWithResetTtl/464d87381b65c6f1ab954340c4b06d00/cf/5cdd3b0f8eff4955b1e01630639172d3 for snapshot=snaptb-testExportWithResetTtl 2024-12-01T17:08:16,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741978_1154 (size=100) 2024-12-01T17:08:16,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741977_1153 (size=100) 2024-12-01T17:08:16,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741977_1153 (size=100) 2024-12-01T17:08:16,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741978_1154 (size=100) 2024-12-01T17:08:16,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741978_1154 (size=100) 2024-12-01T17:08:16,550 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,,1733072895624.87c35a61769cdc3e9a78a17b2c2c9818. 2024-12-01T17:08:16,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741977_1153 (size=100) 2024-12-01T17:08:16,550 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-12-01T17:08:16,550 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,1,1733072895624.464d87381b65c6f1ab954340c4b06d00. 2024-12-01T17:08:16,550 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=81 2024-12-01T17:08:16,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=80 2024-12-01T17:08:16,550 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 87c35a61769cdc3e9a78a17b2c2c9818 2024-12-01T17:08:16,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=81 2024-12-01T17:08:16,550 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 464d87381b65c6f1ab954340c4b06d00 2024-12-01T17:08:16,550 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 87c35a61769cdc3e9a78a17b2c2c9818 2024-12-01T17:08:16,550 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 464d87381b65c6f1ab954340c4b06d00 2024-12-01T17:08:16,552 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=81, ppid=79, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 464d87381b65c6f1ab954340c4b06d00 in 210 msec 2024-12-01T17:08:16,553 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=80, resume processing ppid=79 2024-12-01T17:08:16,553 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=80, ppid=79, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 87c35a61769cdc3e9a78a17b2c2c9818 in 210 msec 2024-12-01T17:08:16,553 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-01T17:08:16,554 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-01T17:08:16,554 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-01T17:08:16,554 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb-testExportWithResetTtl 2024-12-01T17:08:16,555 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-01T17:08:16,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741979_1155 (size=600) 2024-12-01T17:08:16,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741979_1155 (size=600) 2024-12-01T17:08:16,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741979_1155 (size=600) 2024-12-01T17:08:16,567 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-01T17:08:16,572 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-01T17:08:16,572 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-01T17:08:16,574 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_POST_OPERATION 2024-12-01T17:08:16,574 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 79 2024-12-01T17:08:16,575 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=79, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } in 246 msec 2024-12-01T17:08:16,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-12-01T17:08:16,651 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportWithResetTtl completed 2024-12-01T17:08:16,669 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072896669 2024-12-01T17:08:16,669 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:44451, tgtDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072896669, rawTgtDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072896669, srcFsUri=hdfs://localhost:44451, srcDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:08:16,692 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:44451, inputRoot=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:08:16,692 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-69539515_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072896669, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072896669/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-01T17:08:16,694 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-01T17:08:16,698 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb-testExportWithResetTtl to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072896669/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-01T17:08:16,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741981_1157 (size=600) 2024-12-01T17:08:16,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741980_1156 (size=143) 2024-12-01T17:08:16,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741981_1157 (size=600) 2024-12-01T17:08:16,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741981_1157 (size=600) 2024-12-01T17:08:16,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741980_1156 (size=143) 2024-12-01T17:08:16,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741980_1156 (size=143) 2024-12-01T17:08:16,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741982_1158 (size=141) 2024-12-01T17:08:16,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741982_1158 (size=141) 2024-12-01T17:08:16,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741982_1158 (size=141) 2024-12-01T17:08:16,719 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:08:16,719 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:08:16,720 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:08:17,320 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-01T17:08:17,343 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-01T17:08:17,343 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-01T17:08:17,344 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-01T17:08:17,344 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-01T17:08:17,344 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-01T17:08:17,552 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/hadoop-7331707421263484630.jar 2024-12-01T17:08:17,552 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:08:17,553 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:08:17,622 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/hadoop-16421371429186028576.jar 2024-12-01T17:08:17,623 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:08:17,623 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:08:17,624 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:08:17,624 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:08:17,624 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:08:17,625 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:08:17,625 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-01T17:08:17,626 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-01T17:08:17,626 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-01T17:08:17,626 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-01T17:08:17,626 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-01T17:08:17,627 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-01T17:08:17,627 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-01T17:08:17,627 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-01T17:08:17,628 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-01T17:08:17,628 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-01T17:08:17,628 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-01T17:08:17,629 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:08:17,629 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:08:17,629 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-01T17:08:17,630 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:08:17,630 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:08:17,630 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-01T17:08:17,631 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-01T17:08:17,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741983_1159 (size=24020) 2024-12-01T17:08:17,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741983_1159 (size=24020) 2024-12-01T17:08:17,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741983_1159 (size=24020) 2024-12-01T17:08:17,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741984_1160 (size=440956) 2024-12-01T17:08:17,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741984_1160 (size=440956) 2024-12-01T17:08:17,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741984_1160 (size=440956) 2024-12-01T17:08:17,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741985_1161 (size=77755) 2024-12-01T17:08:17,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741985_1161 (size=77755) 2024-12-01T17:08:17,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741985_1161 (size=77755) 2024-12-01T17:08:17,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741986_1162 (size=131360) 2024-12-01T17:08:17,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741986_1162 (size=131360) 2024-12-01T17:08:17,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741986_1162 (size=131360) 2024-12-01T17:08:17,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741987_1163 (size=111793) 2024-12-01T17:08:17,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741987_1163 (size=111793) 2024-12-01T17:08:17,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741987_1163 (size=111793) 2024-12-01T17:08:17,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741988_1164 (size=1832290) 2024-12-01T17:08:17,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741988_1164 (size=1832290) 2024-12-01T17:08:17,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741988_1164 (size=1832290) 2024-12-01T17:08:17,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741989_1165 (size=8360005) 2024-12-01T17:08:17,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741989_1165 (size=8360005) 2024-12-01T17:08:17,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741989_1165 (size=8360005) 2024-12-01T17:08:17,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741990_1166 (size=6424742) 2024-12-01T17:08:17,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741990_1166 (size=6424742) 2024-12-01T17:08:17,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741990_1166 (size=6424742) 2024-12-01T17:08:17,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741991_1167 (size=503880) 2024-12-01T17:08:17,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741991_1167 (size=503880) 2024-12-01T17:08:17,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741991_1167 (size=503880) 2024-12-01T17:08:17,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741992_1168 (size=322274) 2024-12-01T17:08:17,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741992_1168 (size=322274) 2024-12-01T17:08:17,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741992_1168 (size=322274) 2024-12-01T17:08:17,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741993_1169 (size=20406) 2024-12-01T17:08:17,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741993_1169 (size=20406) 2024-12-01T17:08:17,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741993_1169 (size=20406) 2024-12-01T17:08:17,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741994_1170 (size=45609) 2024-12-01T17:08:17,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741994_1170 (size=45609) 2024-12-01T17:08:17,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741994_1170 (size=45609) 2024-12-01T17:08:17,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741995_1171 (size=136454) 2024-12-01T17:08:17,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741995_1171 (size=136454) 2024-12-01T17:08:17,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741995_1171 (size=136454) 2024-12-01T17:08:17,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741996_1172 (size=1597136) 2024-12-01T17:08:17,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741996_1172 (size=1597136) 2024-12-01T17:08:17,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741996_1172 (size=1597136) 2024-12-01T17:08:17,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741997_1173 (size=30873) 2024-12-01T17:08:17,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741997_1173 (size=30873) 2024-12-01T17:08:17,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741997_1173 (size=30873) 2024-12-01T17:08:17,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741998_1174 (size=29229) 2024-12-01T17:08:17,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741998_1174 (size=29229) 2024-12-01T17:08:17,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741998_1174 (size=29229) 2024-12-01T17:08:17,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741999_1175 (size=903864) 2024-12-01T17:08:17,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741999_1175 (size=903864) 2024-12-01T17:08:17,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741999_1175 (size=903864) 2024-12-01T17:08:17,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742000_1176 (size=5175431) 2024-12-01T17:08:17,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742000_1176 (size=5175431) 2024-12-01T17:08:17,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742000_1176 (size=5175431) 2024-12-01T17:08:17,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742001_1177 (size=232881) 2024-12-01T17:08:17,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742001_1177 (size=232881) 2024-12-01T17:08:17,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742001_1177 (size=232881) 2024-12-01T17:08:17,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742002_1178 (size=1323991) 2024-12-01T17:08:17,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742002_1178 (size=1323991) 2024-12-01T17:08:17,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742002_1178 (size=1323991) 2024-12-01T17:08:17,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742003_1179 (size=4695811) 2024-12-01T17:08:17,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742003_1179 (size=4695811) 2024-12-01T17:08:17,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742003_1179 (size=4695811) 2024-12-01T17:08:17,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742004_1180 (size=1877034) 2024-12-01T17:08:17,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742004_1180 (size=1877034) 2024-12-01T17:08:17,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742004_1180 (size=1877034) 2024-12-01T17:08:17,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742005_1181 (size=217555) 2024-12-01T17:08:17,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742005_1181 (size=217555) 2024-12-01T17:08:17,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742005_1181 (size=217555) 2024-12-01T17:08:17,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742006_1182 (size=4188619) 2024-12-01T17:08:17,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742006_1182 (size=4188619) 2024-12-01T17:08:17,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742006_1182 (size=4188619) 2024-12-01T17:08:17,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742007_1183 (size=127628) 2024-12-01T17:08:17,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742007_1183 (size=127628) 2024-12-01T17:08:17,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742007_1183 (size=127628) 2024-12-01T17:08:17,966 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-01T17:08:17,969 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb-testExportWithResetTtl' hfile list 2024-12-01T17:08:17,971 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.1 K 2024-12-01T17:08:17,971 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.2 K 2024-12-01T17:08:17,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742008_1184 (size=427) 2024-12-01T17:08:17,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742008_1184 (size=427) 2024-12-01T17:08:17,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742008_1184 (size=427) 2024-12-01T17:08:17,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742009_1185 (size=21) 2024-12-01T17:08:17,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742009_1185 (size=21) 2024-12-01T17:08:17,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742009_1185 (size=21) 2024-12-01T17:08:18,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742010_1186 (size=304077) 2024-12-01T17:08:18,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742010_1186 (size=304077) 2024-12-01T17:08:18,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742010_1186 (size=304077) 2024-12-01T17:08:18,018 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-01T17:08:18,018 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-01T17:08:18,535 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0003_000001 (auth:SIMPLE) from 127.0.0.1:55414 2024-12-01T17:08:22,848 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-01T17:08:23,222 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0003_000001 (auth:SIMPLE) from 127.0.0.1:55304 2024-12-01T17:08:23,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742011_1187 (size=349775) 2024-12-01T17:08:23,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742011_1187 (size=349775) 2024-12-01T17:08:23,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742011_1187 (size=349775) 2024-12-01T17:08:25,489 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0003_000001 (auth:SIMPLE) from 127.0.0.1:44972 2024-12-01T17:08:25,490 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0003_000001 (auth:SIMPLE) from 127.0.0.1:48618 2024-12-01T17:08:29,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742012_1188 (size=8326) 2024-12-01T17:08:29,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742012_1188 (size=8326) 2024-12-01T17:08:29,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742012_1188 (size=8326) 2024-12-01T17:08:30,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742014_1190 (size=5286) 2024-12-01T17:08:30,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742014_1190 (size=5286) 2024-12-01T17:08:30,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742014_1190 (size=5286) 2024-12-01T17:08:31,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742013_1189 (size=22116) 2024-12-01T17:08:31,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742013_1189 (size=22116) 2024-12-01T17:08:31,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742013_1189 (size=22116) 2024-12-01T17:08:31,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742015_1191 (size=461) 2024-12-01T17:08:31,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742015_1191 (size=461) 2024-12-01T17:08:31,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742015_1191 (size=461) 2024-12-01T17:08:31,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742016_1192 (size=22116) 2024-12-01T17:08:31,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742016_1192 (size=22116) 2024-12-01T17:08:31,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742016_1192 (size=22116) 2024-12-01T17:08:31,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742017_1193 (size=349775) 2024-12-01T17:08:31,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742017_1193 (size=349775) 2024-12-01T17:08:31,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742017_1193 (size=349775) 2024-12-01T17:08:31,120 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0003_000001 (auth:SIMPLE) from 127.0.0.1:48626 2024-12-01T17:08:31,128 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_1/usercache/jenkins/appcache/application_1733072744857_0003/container_1733072744857_0003_01_000003/launch_container.sh] 2024-12-01T17:08:31,128 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_1/usercache/jenkins/appcache/application_1733072744857_0003/container_1733072744857_0003_01_000003/container_tokens] 2024-12-01T17:08:31,128 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_1/usercache/jenkins/appcache/application_1733072744857_0003/container_1733072744857_0003_01_000003/sysfs] 2024-12-01T17:08:32,155 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-01T17:08:32,157 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-01T17:08:32,170 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb-testExportWithResetTtl 2024-12-01T17:08:32,170 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-01T17:08:32,170 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-01T17:08:32,171 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-69539515_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-01T17:08:32,171 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-01T17:08:32,171 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-01T17:08:32,171 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-69539515_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072896669/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072896669/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-01T17:08:32,172 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072896669/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-01T17:08:32,172 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072896669/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-01T17:08:32,182 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testExportWithResetTtl 2024-12-01T17:08:32,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=82, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportWithResetTtl 2024-12-01T17:08:32,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-12-01T17:08:32,204 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733072912204"}]},"ts":"1733072912204"} 2024-12-01T17:08:32,207 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-01T17:08:32,207 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testExportWithResetTtl to state=DISABLING 2024-12-01T17:08:32,208 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=83, ppid=82, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl}] 2024-12-01T17:08:32,210 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=87c35a61769cdc3e9a78a17b2c2c9818, UNASSIGN}, {pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=464d87381b65c6f1ab954340c4b06d00, UNASSIGN}] 2024-12-01T17:08:32,211 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=464d87381b65c6f1ab954340c4b06d00, UNASSIGN 2024-12-01T17:08:32,211 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=87c35a61769cdc3e9a78a17b2c2c9818, UNASSIGN 2024-12-01T17:08:32,214 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=85 updating hbase:meta row=464d87381b65c6f1ab954340c4b06d00, regionState=CLOSING, regionLocation=b87883a7882d,36521,1733072737896 2024-12-01T17:08:32,214 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=84 updating hbase:meta row=87c35a61769cdc3e9a78a17b2c2c9818, regionState=CLOSING, regionLocation=b87883a7882d,44753,1733072737741 2024-12-01T17:08:32,216 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=464d87381b65c6f1ab954340c4b06d00, UNASSIGN because future has completed 2024-12-01T17:08:32,216 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-01T17:08:32,216 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE, hasLock=false; CloseRegionProcedure 464d87381b65c6f1ab954340c4b06d00, server=b87883a7882d,36521,1733072737896}] 2024-12-01T17:08:32,217 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=87c35a61769cdc3e9a78a17b2c2c9818, UNASSIGN because future has completed 2024-12-01T17:08:32,217 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-01T17:08:32,218 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=87, ppid=84, state=RUNNABLE, hasLock=false; CloseRegionProcedure 87c35a61769cdc3e9a78a17b2c2c9818, server=b87883a7882d,44753,1733072737741}] 2024-12-01T17:08:32,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-12-01T17:08:32,369 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(122): Close 464d87381b65c6f1ab954340c4b06d00 2024-12-01T17:08:32,369 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-01T17:08:32,369 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1722): Closing 464d87381b65c6f1ab954340c4b06d00, disabling compactions & flushes 2024-12-01T17:08:32,370 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1733072895624.464d87381b65c6f1ab954340c4b06d00. 2024-12-01T17:08:32,370 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1733072895624.464d87381b65c6f1ab954340c4b06d00. 2024-12-01T17:08:32,370 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1733072895624.464d87381b65c6f1ab954340c4b06d00. after waiting 0 ms 2024-12-01T17:08:32,370 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1733072895624.464d87381b65c6f1ab954340c4b06d00. 2024-12-01T17:08:32,370 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(122): Close 87c35a61769cdc3e9a78a17b2c2c9818 2024-12-01T17:08:32,370 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-01T17:08:32,370 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1722): Closing 87c35a61769cdc3e9a78a17b2c2c9818, disabling compactions & flushes 2024-12-01T17:08:32,370 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1733072895624.87c35a61769cdc3e9a78a17b2c2c9818. 2024-12-01T17:08:32,370 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1733072895624.87c35a61769cdc3e9a78a17b2c2c9818. 2024-12-01T17:08:32,370 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1733072895624.87c35a61769cdc3e9a78a17b2c2c9818. after waiting 0 ms 2024-12-01T17:08:32,370 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1733072895624.87c35a61769cdc3e9a78a17b2c2c9818. 2024-12-01T17:08:32,375 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportWithResetTtl/87c35a61769cdc3e9a78a17b2c2c9818/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-01T17:08:32,375 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportWithResetTtl/464d87381b65c6f1ab954340c4b06d00/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-01T17:08:32,376 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-01T17:08:32,376 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1733072895624.87c35a61769cdc3e9a78a17b2c2c9818. 2024-12-01T17:08:32,376 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-01T17:08:32,376 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1676): Region close journal for 87c35a61769cdc3e9a78a17b2c2c9818: Waiting for close lock at 1733072912370Running coprocessor pre-close hooks at 1733072912370Disabling compacts and flushes for region at 1733072912370Disabling writes for close at 1733072912370Writing region close event to WAL at 1733072912371 (+1 ms)Running coprocessor post-close hooks at 1733072912376 (+5 ms)Closed at 1733072912376 2024-12-01T17:08:32,376 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1733072895624.464d87381b65c6f1ab954340c4b06d00. 2024-12-01T17:08:32,376 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1676): Region close journal for 464d87381b65c6f1ab954340c4b06d00: Waiting for close lock at 1733072912369Running coprocessor pre-close hooks at 1733072912369Disabling compacts and flushes for region at 1733072912369Disabling writes for close at 1733072912370 (+1 ms)Writing region close event to WAL at 1733072912371 (+1 ms)Running coprocessor post-close hooks at 1733072912376 (+5 ms)Closed at 1733072912376 2024-12-01T17:08:32,378 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(157): Closed 87c35a61769cdc3e9a78a17b2c2c9818 2024-12-01T17:08:32,380 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=84 updating hbase:meta row=87c35a61769cdc3e9a78a17b2c2c9818, regionState=CLOSED 2024-12-01T17:08:32,380 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(157): Closed 464d87381b65c6f1ab954340c4b06d00 2024-12-01T17:08:32,381 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=85 updating hbase:meta row=464d87381b65c6f1ab954340c4b06d00, regionState=CLOSED 2024-12-01T17:08:32,383 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=87, ppid=84, state=RUNNABLE, hasLock=false; CloseRegionProcedure 87c35a61769cdc3e9a78a17b2c2c9818, server=b87883a7882d,44753,1733072737741 because future has completed 2024-12-01T17:08:32,383 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=86, ppid=85, state=RUNNABLE, hasLock=false; CloseRegionProcedure 464d87381b65c6f1ab954340c4b06d00, server=b87883a7882d,36521,1733072737896 because future has completed 2024-12-01T17:08:32,386 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=87, resume processing ppid=84 2024-12-01T17:08:32,386 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=87, ppid=84, state=SUCCESS, hasLock=false; CloseRegionProcedure 87c35a61769cdc3e9a78a17b2c2c9818, server=b87883a7882d,44753,1733072737741 in 166 msec 2024-12-01T17:08:32,386 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=86, resume processing ppid=85 2024-12-01T17:08:32,387 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=86, ppid=85, state=SUCCESS, hasLock=false; CloseRegionProcedure 464d87381b65c6f1ab954340c4b06d00, server=b87883a7882d,36521,1733072737896 in 168 msec 2024-12-01T17:08:32,388 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=84, ppid=83, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=87c35a61769cdc3e9a78a17b2c2c9818, UNASSIGN in 176 msec 2024-12-01T17:08:32,390 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=85, resume processing ppid=83 2024-12-01T17:08:32,391 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=85, ppid=83, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=464d87381b65c6f1ab954340c4b06d00, UNASSIGN in 177 msec 2024-12-01T17:08:32,394 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=83, resume processing ppid=82 2024-12-01T17:08:32,394 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=83, ppid=82, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl in 183 msec 2024-12-01T17:08:32,396 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733072912396"}]},"ts":"1733072912396"} 2024-12-01T17:08:32,398 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-01T17:08:32,398 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testExportWithResetTtl to state=DISABLED 2024-12-01T17:08:32,402 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=82, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportWithResetTtl in 217 msec 2024-12-01T17:08:32,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-12-01T17:08:32,500 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportWithResetTtl completed 2024-12-01T17:08:32,501 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testExportWithResetTtl 2024-12-01T17:08:32,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=88, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl 2024-12-01T17:08:32,503 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=88, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-01T17:08:32,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] access.PermissionStorage(261): Removing permissions of removed table testExportWithResetTtl 2024-12-01T17:08:32,504 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=88, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-01T17:08:32,508 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36521 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportWithResetTtl 2024-12-01T17:08:32,512 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportWithResetTtl/87c35a61769cdc3e9a78a17b2c2c9818 2024-12-01T17:08:32,512 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportWithResetTtl/464d87381b65c6f1ab954340c4b06d00 2024-12-01T17:08:32,514 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportWithResetTtl/87c35a61769cdc3e9a78a17b2c2c9818/cf, FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportWithResetTtl/87c35a61769cdc3e9a78a17b2c2c9818/recovered.edits] 2024-12-01T17:08:32,515 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportWithResetTtl/464d87381b65c6f1ab954340c4b06d00/cf, FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportWithResetTtl/464d87381b65c6f1ab954340c4b06d00/recovered.edits] 2024-12-01T17:08:32,520 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportWithResetTtl/87c35a61769cdc3e9a78a17b2c2c9818/cf/57470b521b984e2394325a71f471aeca to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testExportWithResetTtl/87c35a61769cdc3e9a78a17b2c2c9818/cf/57470b521b984e2394325a71f471aeca 2024-12-01T17:08:32,520 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportWithResetTtl/464d87381b65c6f1ab954340c4b06d00/cf/5cdd3b0f8eff4955b1e01630639172d3 to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testExportWithResetTtl/464d87381b65c6f1ab954340c4b06d00/cf/5cdd3b0f8eff4955b1e01630639172d3 2024-12-01T17:08:32,523 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportWithResetTtl/87c35a61769cdc3e9a78a17b2c2c9818/recovered.edits/8.seqid to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testExportWithResetTtl/87c35a61769cdc3e9a78a17b2c2c9818/recovered.edits/8.seqid 2024-12-01T17:08:32,523 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportWithResetTtl/464d87381b65c6f1ab954340c4b06d00/recovered.edits/8.seqid to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testExportWithResetTtl/464d87381b65c6f1ab954340c4b06d00/recovered.edits/8.seqid 2024-12-01T17:08:32,523 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportWithResetTtl/87c35a61769cdc3e9a78a17b2c2c9818 2024-12-01T17:08:32,524 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportWithResetTtl/464d87381b65c6f1ab954340c4b06d00 2024-12-01T17:08:32,524 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testExportWithResetTtl regions 2024-12-01T17:08:32,526 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=88, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-01T17:08:32,529 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testExportWithResetTtl from hbase:meta 2024-12-01T17:08:32,531 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testExportWithResetTtl' descriptor. 2024-12-01T17:08:32,533 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=88, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-01T17:08:32,533 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testExportWithResetTtl' from region states. 2024-12-01T17:08:32,533 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,,1733072895624.87c35a61769cdc3e9a78a17b2c2c9818.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733072912533"}]},"ts":"9223372036854775807"} 2024-12-01T17:08:32,533 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,1,1733072895624.464d87381b65c6f1ab954340c4b06d00.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733072912533"}]},"ts":"9223372036854775807"} 2024-12-01T17:08:32,536 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-01T17:08:32,536 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 87c35a61769cdc3e9a78a17b2c2c9818, NAME => 'testExportWithResetTtl,,1733072895624.87c35a61769cdc3e9a78a17b2c2c9818.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 464d87381b65c6f1ab954340c4b06d00, NAME => 'testExportWithResetTtl,1,1733072895624.464d87381b65c6f1ab954340c4b06d00.', STARTKEY => '1', ENDKEY => ''}] 2024-12-01T17:08:32,536 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testExportWithResetTtl' as deleted. 2024-12-01T17:08:32,536 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733072912536"}]},"ts":"9223372036854775807"} 2024-12-01T17:08:32,539 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testExportWithResetTtl state from META 2024-12-01T17:08:32,539 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=88, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-01T17:08:32,541 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=88, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl in 38 msec 2024-12-01T17:08:32,586 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-01T17:08:32,586 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-01T17:08:32,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-01T17:08:32,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-01T17:08:32,587 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-01T17:08:32,587 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-01T17:08:32,587 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-01T17:08:32,587 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-01T17:08:32,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-01T17:08:32,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-01T17:08:32,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-01T17:08:32,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:08:32,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:08:32,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:08:32,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-01T17:08:32,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:08:32,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=88 2024-12-01T17:08:32,598 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-01T17:08:32,599 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-01T17:08:32,599 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testExportWithResetTtl 2024-12-01T17:08:32,599 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportWithResetTtl completed 2024-12-01T17:08:32,599 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-01T17:08:32,599 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-01T17:08:32,599 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testtb-testExportWithResetTtl 2024-12-01T17:08:32,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=89, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl 2024-12-01T17:08:32,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-12-01T17:08:32,602 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733072912602"}]},"ts":"1733072912602"} 2024-12-01T17:08:32,604 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-01T17:08:32,604 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithResetTtl to state=DISABLING 2024-12-01T17:08:32,605 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl}] 2024-12-01T17:08:32,608 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=c8a2698e1c1b6f84eec48bde35425d3b, UNASSIGN}, {pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=a6aa36629f27a19a8436f2bd4251fc80, UNASSIGN}] 2024-12-01T17:08:32,609 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=a6aa36629f27a19a8436f2bd4251fc80, UNASSIGN 2024-12-01T17:08:32,609 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=c8a2698e1c1b6f84eec48bde35425d3b, UNASSIGN 2024-12-01T17:08:32,610 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=92 updating hbase:meta row=a6aa36629f27a19a8436f2bd4251fc80, regionState=CLOSING, regionLocation=b87883a7882d,41375,1733072737967 2024-12-01T17:08:32,610 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=91 updating hbase:meta row=c8a2698e1c1b6f84eec48bde35425d3b, regionState=CLOSING, regionLocation=b87883a7882d,36521,1733072737896 2024-12-01T17:08:32,613 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=a6aa36629f27a19a8436f2bd4251fc80, UNASSIGN because future has completed 2024-12-01T17:08:32,614 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=c8a2698e1c1b6f84eec48bde35425d3b, UNASSIGN because future has completed 2024-12-01T17:08:32,619 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-01T17:08:32,619 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-01T17:08:32,619 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=93, ppid=91, state=RUNNABLE, hasLock=false; CloseRegionProcedure c8a2698e1c1b6f84eec48bde35425d3b, server=b87883a7882d,36521,1733072737896}] 2024-12-01T17:08:32,619 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=94, ppid=92, state=RUNNABLE, hasLock=false; CloseRegionProcedure a6aa36629f27a19a8436f2bd4251fc80, server=b87883a7882d,41375,1733072737967}] 2024-12-01T17:08:32,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-12-01T17:08:32,774 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(122): Close a6aa36629f27a19a8436f2bd4251fc80 2024-12-01T17:08:32,774 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-01T17:08:32,774 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1722): Closing a6aa36629f27a19a8436f2bd4251fc80, disabling compactions & flushes 2024-12-01T17:08:32,774 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(122): Close c8a2698e1c1b6f84eec48bde35425d3b 2024-12-01T17:08:32,774 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1733072892885.a6aa36629f27a19a8436f2bd4251fc80. 2024-12-01T17:08:32,774 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-01T17:08:32,774 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1733072892885.a6aa36629f27a19a8436f2bd4251fc80. 2024-12-01T17:08:32,774 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1733072892885.a6aa36629f27a19a8436f2bd4251fc80. after waiting 0 ms 2024-12-01T17:08:32,774 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1722): Closing c8a2698e1c1b6f84eec48bde35425d3b, disabling compactions & flushes 2024-12-01T17:08:32,774 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1733072892885.a6aa36629f27a19a8436f2bd4251fc80. 2024-12-01T17:08:32,774 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1733072892885.c8a2698e1c1b6f84eec48bde35425d3b. 2024-12-01T17:08:32,774 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1733072892885.c8a2698e1c1b6f84eec48bde35425d3b. 2024-12-01T17:08:32,774 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1733072892885.c8a2698e1c1b6f84eec48bde35425d3b. after waiting 0 ms 2024-12-01T17:08:32,774 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1733072892885.c8a2698e1c1b6f84eec48bde35425d3b. 2024-12-01T17:08:32,793 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithResetTtl/c8a2698e1c1b6f84eec48bde35425d3b/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-01T17:08:32,794 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-01T17:08:32,794 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1733072892885.c8a2698e1c1b6f84eec48bde35425d3b. 2024-12-01T17:08:32,794 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1676): Region close journal for c8a2698e1c1b6f84eec48bde35425d3b: Waiting for close lock at 1733072912774Running coprocessor pre-close hooks at 1733072912774Disabling compacts and flushes for region at 1733072912774Disabling writes for close at 1733072912774Writing region close event to WAL at 1733072912779 (+5 ms)Running coprocessor post-close hooks at 1733072912794 (+15 ms)Closed at 1733072912794 2024-12-01T17:08:32,796 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(157): Closed c8a2698e1c1b6f84eec48bde35425d3b 2024-12-01T17:08:32,800 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=91 updating hbase:meta row=c8a2698e1c1b6f84eec48bde35425d3b, regionState=CLOSED 2024-12-01T17:08:32,802 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=93, ppid=91, state=RUNNABLE, hasLock=false; CloseRegionProcedure c8a2698e1c1b6f84eec48bde35425d3b, server=b87883a7882d,36521,1733072737896 because future has completed 2024-12-01T17:08:32,805 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=93, resume processing ppid=91 2024-12-01T17:08:32,805 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithResetTtl/a6aa36629f27a19a8436f2bd4251fc80/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-01T17:08:32,806 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=93, ppid=91, state=SUCCESS, hasLock=false; CloseRegionProcedure c8a2698e1c1b6f84eec48bde35425d3b, server=b87883a7882d,36521,1733072737896 in 184 msec 2024-12-01T17:08:32,806 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-01T17:08:32,806 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1733072892885.a6aa36629f27a19a8436f2bd4251fc80. 2024-12-01T17:08:32,806 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1676): Region close journal for a6aa36629f27a19a8436f2bd4251fc80: Waiting for close lock at 1733072912774Running coprocessor pre-close hooks at 1733072912774Disabling compacts and flushes for region at 1733072912774Disabling writes for close at 1733072912774Writing region close event to WAL at 1733072912779 (+5 ms)Running coprocessor post-close hooks at 1733072912806 (+27 ms)Closed at 1733072912806 2024-12-01T17:08:32,806 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=91, ppid=90, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=c8a2698e1c1b6f84eec48bde35425d3b, UNASSIGN in 197 msec 2024-12-01T17:08:32,807 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(157): Closed a6aa36629f27a19a8436f2bd4251fc80 2024-12-01T17:08:32,808 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=92 updating hbase:meta row=a6aa36629f27a19a8436f2bd4251fc80, regionState=CLOSED 2024-12-01T17:08:32,813 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=94, ppid=92, state=RUNNABLE, hasLock=false; CloseRegionProcedure a6aa36629f27a19a8436f2bd4251fc80, server=b87883a7882d,41375,1733072737967 because future has completed 2024-12-01T17:08:32,818 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=94, resume processing ppid=92 2024-12-01T17:08:32,818 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=94, ppid=92, state=SUCCESS, hasLock=false; CloseRegionProcedure a6aa36629f27a19a8436f2bd4251fc80, server=b87883a7882d,41375,1733072737967 in 195 msec 2024-12-01T17:08:32,829 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=92, resume processing ppid=90 2024-12-01T17:08:32,829 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=92, ppid=90, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=a6aa36629f27a19a8436f2bd4251fc80, UNASSIGN in 210 msec 2024-12-01T17:08:32,835 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=90, resume processing ppid=89 2024-12-01T17:08:32,835 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=90, ppid=89, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl in 226 msec 2024-12-01T17:08:32,839 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733072912839"}]},"ts":"1733072912839"} 2024-12-01T17:08:32,842 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-01T17:08:32,843 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithResetTtl to state=DISABLED 2024-12-01T17:08:32,847 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=89, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl in 245 msec 2024-12-01T17:08:32,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-12-01T17:08:32,922 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-01T17:08:32,923 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testtb-testExportWithResetTtl 2024-12-01T17:08:32,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-01T17:08:32,934 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-01T17:08:32,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithResetTtl 2024-12-01T17:08:32,935 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=95, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-01T17:08:32,937 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36521 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithResetTtl 2024-12-01T17:08:32,940 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithResetTtl/c8a2698e1c1b6f84eec48bde35425d3b 2024-12-01T17:08:32,940 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithResetTtl/a6aa36629f27a19a8436f2bd4251fc80 2024-12-01T17:08:32,942 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithResetTtl/c8a2698e1c1b6f84eec48bde35425d3b/cf, FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithResetTtl/c8a2698e1c1b6f84eec48bde35425d3b/recovered.edits] 2024-12-01T17:08:32,942 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithResetTtl/a6aa36629f27a19a8436f2bd4251fc80/cf, FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithResetTtl/a6aa36629f27a19a8436f2bd4251fc80/recovered.edits] 2024-12-01T17:08:32,949 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithResetTtl/c8a2698e1c1b6f84eec48bde35425d3b/cf/06862b1efad24e01aea4a300e6ea10c3 to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testExportWithResetTtl/c8a2698e1c1b6f84eec48bde35425d3b/cf/06862b1efad24e01aea4a300e6ea10c3 2024-12-01T17:08:32,950 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithResetTtl/a6aa36629f27a19a8436f2bd4251fc80/cf/a46e25421ffd43369dc339e75260127c to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testExportWithResetTtl/a6aa36629f27a19a8436f2bd4251fc80/cf/a46e25421ffd43369dc339e75260127c 2024-12-01T17:08:32,956 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithResetTtl/c8a2698e1c1b6f84eec48bde35425d3b/recovered.edits/9.seqid to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testExportWithResetTtl/c8a2698e1c1b6f84eec48bde35425d3b/recovered.edits/9.seqid 2024-12-01T17:08:32,956 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithResetTtl/a6aa36629f27a19a8436f2bd4251fc80/recovered.edits/9.seqid to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testExportWithResetTtl/a6aa36629f27a19a8436f2bd4251fc80/recovered.edits/9.seqid 2024-12-01T17:08:32,956 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithResetTtl/c8a2698e1c1b6f84eec48bde35425d3b 2024-12-01T17:08:32,956 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithResetTtl/a6aa36629f27a19a8436f2bd4251fc80 2024-12-01T17:08:32,956 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithResetTtl regions 2024-12-01T17:08:32,959 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=95, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-01T17:08:32,963 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithResetTtl from hbase:meta 2024-12-01T17:08:32,966 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithResetTtl' descriptor. 2024-12-01T17:08:32,968 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=95, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-01T17:08:32,968 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithResetTtl' from region states. 2024-12-01T17:08:32,969 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,,1733072892885.c8a2698e1c1b6f84eec48bde35425d3b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733072912968"}]},"ts":"9223372036854775807"} 2024-12-01T17:08:32,969 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,1,1733072892885.a6aa36629f27a19a8436f2bd4251fc80.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733072912968"}]},"ts":"9223372036854775807"} 2024-12-01T17:08:32,973 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-01T17:08:32,973 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => c8a2698e1c1b6f84eec48bde35425d3b, NAME => 'testtb-testExportWithResetTtl,,1733072892885.c8a2698e1c1b6f84eec48bde35425d3b.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => a6aa36629f27a19a8436f2bd4251fc80, NAME => 'testtb-testExportWithResetTtl,1,1733072892885.a6aa36629f27a19a8436f2bd4251fc80.', STARTKEY => '1', ENDKEY => ''}] 2024-12-01T17:08:32,973 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithResetTtl' as deleted. 2024-12-01T17:08:32,973 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733072912973"}]},"ts":"9223372036854775807"} 2024-12-01T17:08:32,976 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithResetTtl state from META 2024-12-01T17:08:32,978 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=95, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-01T17:08:32,979 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=95, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl in 55 msec 2024-12-01T17:08:33,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-01T17:08:33,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-01T17:08:33,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-01T17:08:33,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-01T17:08:33,575 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-01T17:08:33,575 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-01T17:08:33,575 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-01T17:08:33,575 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-01T17:08:33,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-01T17:08:33,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-01T17:08:33,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-01T17:08:33,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:08:33,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:08:33,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-01T17:08:33,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:08:33,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:08:33,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=95 2024-12-01T17:08:33,799 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithResetTtl 2024-12-01T17:08:33,799 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-01T17:08:33,817 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "emptySnaptb0-testExportWithResetTtl" type: DISABLED 2024-12-01T17:08:33,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithResetTtl 2024-12-01T17:08:33,822 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb-testExportWithResetTtl" type: DISABLED 2024-12-01T17:08:33,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb-testExportWithResetTtl 2024-12-01T17:08:33,830 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb0-testExportWithResetTtl" type: DISABLED 2024-12-01T17:08:33,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithResetTtl 2024-12-01T17:08:33,930 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=802 (was 795) Potentially hanging thread: HFileArchiver-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-69539515_22 at /127.0.0.1:53286 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35233 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 127337) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1396781562) connection to localhost/127.0.0.1:38841 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Thread-2973 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Client (1396781562) connection to localhost/127.0.0.1:38701 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_674983604_1 at /127.0.0.1:37910 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-69539515_22 at /127.0.0.1:45250 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1396781562) connection to localhost/127.0.0.1:35233 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38701 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-69539515_22 at /127.0.0.1:43620 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=812 (was 802) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=627 (was 576) - SystemLoadAverage LEAK? -, ProcessCount=29 (was 28) - ProcessCount LEAK? -, AvailableMemoryMB=3466 (was 3631) 2024-12-01T17:08:33,930 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=802 is superior to 500 2024-12-01T17:08:33,958 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=802, OpenFileDescriptor=812, MaxFileDescriptor=1048576, SystemLoadAverage=627, ProcessCount=29, AvailableMemoryMB=3457 2024-12-01T17:08:33,958 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=802 is superior to 500 2024-12-01T17:08:33,962 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T17:08:33,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState 2024-12-01T17:08:33,965 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-01T17:08:33,965 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:08:33,967 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemState" procId is: 96 2024-12-01T17:08:33,967 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-01T17:08:33,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-01T17:08:34,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742018_1194 (size=407) 2024-12-01T17:08:34,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742018_1194 (size=407) 2024-12-01T17:08:34,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742018_1194 (size=407) 2024-12-01T17:08:34,049 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => c4140f72b90ed232a7be58f97bfa7f81, NAME => 'testtb-testExportFileSystemState,,1733072913961.c4140f72b90ed232a7be58f97bfa7f81.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:08:34,060 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => b3d164d1f6b75d91a9739eb445f344ac, NAME => 'testtb-testExportFileSystemState,1,1733072913961.b3d164d1f6b75d91a9739eb445f344ac.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:08:34,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-01T17:08:34,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742019_1195 (size=68) 2024-12-01T17:08:34,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742019_1195 (size=68) 2024-12-01T17:08:34,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742019_1195 (size=68) 2024-12-01T17:08:34,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742020_1196 (size=68) 2024-12-01T17:08:34,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742020_1196 (size=68) 2024-12-01T17:08:34,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742020_1196 (size=68) 2024-12-01T17:08:34,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-01T17:08:34,533 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1733072913961.b3d164d1f6b75d91a9739eb445f344ac.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:08:34,533 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing b3d164d1f6b75d91a9739eb445f344ac, disabling compactions & flushes 2024-12-01T17:08:34,533 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1733072913961.b3d164d1f6b75d91a9739eb445f344ac. 2024-12-01T17:08:34,533 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1733072913961.b3d164d1f6b75d91a9739eb445f344ac. 2024-12-01T17:08:34,533 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1733072913961.b3d164d1f6b75d91a9739eb445f344ac. after waiting 0 ms 2024-12-01T17:08:34,533 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1733072913961.b3d164d1f6b75d91a9739eb445f344ac. 2024-12-01T17:08:34,533 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1733072913961.b3d164d1f6b75d91a9739eb445f344ac. 2024-12-01T17:08:34,533 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for b3d164d1f6b75d91a9739eb445f344ac: Waiting for close lock at 1733072914533Disabling compacts and flushes for region at 1733072914533Disabling writes for close at 1733072914533Writing region close event to WAL at 1733072914533Closed at 1733072914533 2024-12-01T17:08:34,534 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1733072913961.c4140f72b90ed232a7be58f97bfa7f81.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:08:34,534 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing c4140f72b90ed232a7be58f97bfa7f81, disabling compactions & flushes 2024-12-01T17:08:34,534 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1733072913961.c4140f72b90ed232a7be58f97bfa7f81. 2024-12-01T17:08:34,534 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1733072913961.c4140f72b90ed232a7be58f97bfa7f81. 2024-12-01T17:08:34,534 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1733072913961.c4140f72b90ed232a7be58f97bfa7f81. after waiting 0 ms 2024-12-01T17:08:34,534 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1733072913961.c4140f72b90ed232a7be58f97bfa7f81. 2024-12-01T17:08:34,534 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1733072913961.c4140f72b90ed232a7be58f97bfa7f81. 2024-12-01T17:08:34,534 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for c4140f72b90ed232a7be58f97bfa7f81: Waiting for close lock at 1733072914534Disabling compacts and flushes for region at 1733072914534Disabling writes for close at 1733072914534Writing region close event to WAL at 1733072914534Closed at 1733072914534 2024-12-01T17:08:34,538 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-01T17:08:34,538 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,1,1733072913961.b3d164d1f6b75d91a9739eb445f344ac.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733072914538"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733072914538"}]},"ts":"1733072914538"} 2024-12-01T17:08:34,539 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,,1733072913961.c4140f72b90ed232a7be58f97bfa7f81.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733072914538"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733072914538"}]},"ts":"1733072914538"} 2024-12-01T17:08:34,543 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-01T17:08:34,544 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-01T17:08:34,545 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733072914544"}]},"ts":"1733072914544"} 2024-12-01T17:08:34,548 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLING in hbase:meta 2024-12-01T17:08:34,548 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {b87883a7882d=0} racks are {/default-rack=0} 2024-12-01T17:08:34,549 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-01T17:08:34,549 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-01T17:08:34,549 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-01T17:08:34,550 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-01T17:08:34,550 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-01T17:08:34,550 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-01T17:08:34,550 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-01T17:08:34,550 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-01T17:08:34,550 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-01T17:08:34,550 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-01T17:08:34,550 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=c4140f72b90ed232a7be58f97bfa7f81, ASSIGN}, {pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=b3d164d1f6b75d91a9739eb445f344ac, ASSIGN}] 2024-12-01T17:08:34,552 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=b3d164d1f6b75d91a9739eb445f344ac, ASSIGN 2024-12-01T17:08:34,552 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=c4140f72b90ed232a7be58f97bfa7f81, ASSIGN 2024-12-01T17:08:34,553 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=c4140f72b90ed232a7be58f97bfa7f81, ASSIGN; state=OFFLINE, location=b87883a7882d,44753,1733072737741; forceNewPlan=false, retain=false 2024-12-01T17:08:34,553 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=b3d164d1f6b75d91a9739eb445f344ac, ASSIGN; state=OFFLINE, location=b87883a7882d,36521,1733072737896; forceNewPlan=false, retain=false 2024-12-01T17:08:34,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-01T17:08:34,704 INFO [b87883a7882d:38737 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-01T17:08:34,704 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=97 updating hbase:meta row=c4140f72b90ed232a7be58f97bfa7f81, regionState=OPENING, regionLocation=b87883a7882d,44753,1733072737741 2024-12-01T17:08:34,705 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=98 updating hbase:meta row=b3d164d1f6b75d91a9739eb445f344ac, regionState=OPENING, regionLocation=b87883a7882d,36521,1733072737896 2024-12-01T17:08:34,708 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=c4140f72b90ed232a7be58f97bfa7f81, ASSIGN because future has completed 2024-12-01T17:08:34,709 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=99, ppid=97, state=RUNNABLE, hasLock=false; OpenRegionProcedure c4140f72b90ed232a7be58f97bfa7f81, server=b87883a7882d,44753,1733072737741}] 2024-12-01T17:08:34,711 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=b3d164d1f6b75d91a9739eb445f344ac, ASSIGN because future has completed 2024-12-01T17:08:34,711 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=100, ppid=98, state=RUNNABLE, hasLock=false; OpenRegionProcedure b3d164d1f6b75d91a9739eb445f344ac, server=b87883a7882d,36521,1733072737896}] 2024-12-01T17:08:34,869 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,,1733072913961.c4140f72b90ed232a7be58f97bfa7f81. 2024-12-01T17:08:34,869 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7752): Opening region: {ENCODED => c4140f72b90ed232a7be58f97bfa7f81, NAME => 'testtb-testExportFileSystemState,,1733072913961.c4140f72b90ed232a7be58f97bfa7f81.', STARTKEY => '', ENDKEY => '1'} 2024-12-01T17:08:34,870 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,1,1733072913961.b3d164d1f6b75d91a9739eb445f344ac. 2024-12-01T17:08:34,870 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,,1733072913961.c4140f72b90ed232a7be58f97bfa7f81. service=AccessControlService 2024-12-01T17:08:34,870 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7752): Opening region: {ENCODED => b3d164d1f6b75d91a9739eb445f344ac, NAME => 'testtb-testExportFileSystemState,1,1733072913961.b3d164d1f6b75d91a9739eb445f344ac.', STARTKEY => '1', ENDKEY => ''} 2024-12-01T17:08:34,870 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-01T17:08:34,870 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,1,1733072913961.b3d164d1f6b75d91a9739eb445f344ac. service=AccessControlService 2024-12-01T17:08:34,870 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState c4140f72b90ed232a7be58f97bfa7f81 2024-12-01T17:08:34,870 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-01T17:08:34,870 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1733072913961.c4140f72b90ed232a7be58f97bfa7f81.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:08:34,871 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7794): checking encryption for c4140f72b90ed232a7be58f97bfa7f81 2024-12-01T17:08:34,871 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState b3d164d1f6b75d91a9739eb445f344ac 2024-12-01T17:08:34,871 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7797): checking classloading for c4140f72b90ed232a7be58f97bfa7f81 2024-12-01T17:08:34,871 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1733072913961.b3d164d1f6b75d91a9739eb445f344ac.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:08:34,871 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7794): checking encryption for b3d164d1f6b75d91a9739eb445f344ac 2024-12-01T17:08:34,871 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7797): checking classloading for b3d164d1f6b75d91a9739eb445f344ac 2024-12-01T17:08:34,885 INFO [StoreOpener-c4140f72b90ed232a7be58f97bfa7f81-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region c4140f72b90ed232a7be58f97bfa7f81 2024-12-01T17:08:34,888 INFO [StoreOpener-c4140f72b90ed232a7be58f97bfa7f81-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c4140f72b90ed232a7be58f97bfa7f81 columnFamilyName cf 2024-12-01T17:08:34,888 DEBUG [StoreOpener-c4140f72b90ed232a7be58f97bfa7f81-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:08:34,890 INFO [StoreOpener-c4140f72b90ed232a7be58f97bfa7f81-1 {}] regionserver.HStore(327): Store=c4140f72b90ed232a7be58f97bfa7f81/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T17:08:34,890 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1038): replaying wal for c4140f72b90ed232a7be58f97bfa7f81 2024-12-01T17:08:34,891 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemState/c4140f72b90ed232a7be58f97bfa7f81 2024-12-01T17:08:34,892 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemState/c4140f72b90ed232a7be58f97bfa7f81 2024-12-01T17:08:34,892 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1048): stopping wal replay for c4140f72b90ed232a7be58f97bfa7f81 2024-12-01T17:08:34,892 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1060): Cleaning up temporary data for c4140f72b90ed232a7be58f97bfa7f81 2024-12-01T17:08:34,897 INFO [StoreOpener-b3d164d1f6b75d91a9739eb445f344ac-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b3d164d1f6b75d91a9739eb445f344ac 2024-12-01T17:08:34,899 INFO [StoreOpener-b3d164d1f6b75d91a9739eb445f344ac-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b3d164d1f6b75d91a9739eb445f344ac columnFamilyName cf 2024-12-01T17:08:34,900 DEBUG [StoreOpener-b3d164d1f6b75d91a9739eb445f344ac-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:08:34,900 INFO [StoreOpener-b3d164d1f6b75d91a9739eb445f344ac-1 {}] regionserver.HStore(327): Store=b3d164d1f6b75d91a9739eb445f344ac/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T17:08:34,900 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1038): replaying wal for b3d164d1f6b75d91a9739eb445f344ac 2024-12-01T17:08:34,901 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemState/b3d164d1f6b75d91a9739eb445f344ac 2024-12-01T17:08:34,902 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1093): writing seq id for c4140f72b90ed232a7be58f97bfa7f81 2024-12-01T17:08:34,902 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemState/b3d164d1f6b75d91a9739eb445f344ac 2024-12-01T17:08:34,907 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1048): stopping wal replay for b3d164d1f6b75d91a9739eb445f344ac 2024-12-01T17:08:34,907 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1060): Cleaning up temporary data for b3d164d1f6b75d91a9739eb445f344ac 2024-12-01T17:08:34,909 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1093): writing seq id for b3d164d1f6b75d91a9739eb445f344ac 2024-12-01T17:08:34,921 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemState/c4140f72b90ed232a7be58f97bfa7f81/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T17:08:34,922 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1114): Opened c4140f72b90ed232a7be58f97bfa7f81; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61759052, jitterRate=-0.07971841096878052}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T17:08:34,922 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c4140f72b90ed232a7be58f97bfa7f81 2024-12-01T17:08:34,923 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1006): Region open journal for c4140f72b90ed232a7be58f97bfa7f81: Running coprocessor pre-open hook at 1733072914871Writing region info on filesystem at 1733072914871Initializing all the Stores at 1733072914872 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733072914872Cleaning up temporary data from old regions at 1733072914892 (+20 ms)Running coprocessor post-open hooks at 1733072914922 (+30 ms)Region opened successfully at 1733072914923 (+1 ms) 2024-12-01T17:08:34,924 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,,1733072913961.c4140f72b90ed232a7be58f97bfa7f81., pid=99, masterSystemTime=1733072914863 2024-12-01T17:08:34,927 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,,1733072913961.c4140f72b90ed232a7be58f97bfa7f81. 2024-12-01T17:08:34,928 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,,1733072913961.c4140f72b90ed232a7be58f97bfa7f81. 2024-12-01T17:08:34,929 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=97 updating hbase:meta row=c4140f72b90ed232a7be58f97bfa7f81, regionState=OPEN, openSeqNum=2, regionLocation=b87883a7882d,44753,1733072737741 2024-12-01T17:08:34,935 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=99, ppid=97, state=RUNNABLE, hasLock=false; OpenRegionProcedure c4140f72b90ed232a7be58f97bfa7f81, server=b87883a7882d,44753,1733072737741 because future has completed 2024-12-01T17:08:34,936 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemState/b3d164d1f6b75d91a9739eb445f344ac/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T17:08:34,937 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1114): Opened b3d164d1f6b75d91a9739eb445f344ac; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72734653, jitterRate=0.08383078873157501}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T17:08:34,937 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b3d164d1f6b75d91a9739eb445f344ac 2024-12-01T17:08:34,937 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1006): Region open journal for b3d164d1f6b75d91a9739eb445f344ac: Running coprocessor pre-open hook at 1733072914871Writing region info on filesystem at 1733072914871Initializing all the Stores at 1733072914872 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733072914872Cleaning up temporary data from old regions at 1733072914907 (+35 ms)Running coprocessor post-open hooks at 1733072914937 (+30 ms)Region opened successfully at 1733072914937 2024-12-01T17:08:34,939 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,1,1733072913961.b3d164d1f6b75d91a9739eb445f344ac., pid=100, masterSystemTime=1733072914864 2024-12-01T17:08:34,941 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_1/usercache/jenkins/appcache/application_1733072744857_0003/container_1733072744857_0003_01_000002/launch_container.sh] 2024-12-01T17:08:34,941 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_1/usercache/jenkins/appcache/application_1733072744857_0003/container_1733072744857_0003_01_000002/container_tokens] 2024-12-01T17:08:34,942 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_1/usercache/jenkins/appcache/application_1733072744857_0003/container_1733072744857_0003_01_000002/sysfs] 2024-12-01T17:08:34,948 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,1,1733072913961.b3d164d1f6b75d91a9739eb445f344ac. 2024-12-01T17:08:34,948 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,1,1733072913961.b3d164d1f6b75d91a9739eb445f344ac. 2024-12-01T17:08:34,949 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=98 updating hbase:meta row=b3d164d1f6b75d91a9739eb445f344ac, regionState=OPEN, openSeqNum=2, regionLocation=b87883a7882d,36521,1733072737896 2024-12-01T17:08:34,952 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=99, resume processing ppid=97 2024-12-01T17:08:34,952 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=99, ppid=97, state=SUCCESS, hasLock=false; OpenRegionProcedure c4140f72b90ed232a7be58f97bfa7f81, server=b87883a7882d,44753,1733072737741 in 228 msec 2024-12-01T17:08:34,954 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=100, ppid=98, state=RUNNABLE, hasLock=false; OpenRegionProcedure b3d164d1f6b75d91a9739eb445f344ac, server=b87883a7882d,36521,1733072737896 because future has completed 2024-12-01T17:08:34,956 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=97, ppid=96, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=c4140f72b90ed232a7be58f97bfa7f81, ASSIGN in 402 msec 2024-12-01T17:08:34,960 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=100, resume processing ppid=98 2024-12-01T17:08:34,960 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=100, ppid=98, state=SUCCESS, hasLock=false; OpenRegionProcedure b3d164d1f6b75d91a9739eb445f344ac, server=b87883a7882d,36521,1733072737896 in 246 msec 2024-12-01T17:08:34,969 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=98, resume processing ppid=96 2024-12-01T17:08:34,969 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=98, ppid=96, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=b3d164d1f6b75d91a9739eb445f344ac, ASSIGN in 410 msec 2024-12-01T17:08:34,971 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-01T17:08:34,971 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733072914971"}]},"ts":"1733072914971"} 2024-12-01T17:08:34,974 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLED in hbase:meta 2024-12-01T17:08:34,975 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-01T17:08:34,976 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemState jenkins: RWXCA 2024-12-01T17:08:34,989 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36521 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-01T17:08:35,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:08:35,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:08:35,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:08:35,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:08:35,030 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-01T17:08:35,030 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-01T17:08:35,032 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-01T17:08:35,032 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-01T17:08:35,034 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=96, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState in 1.0690 sec 2024-12-01T17:08:35,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-01T17:08:35,111 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemState completed 2024-12-01T17:08:35,111 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemState get assigned. Timeout = 60000ms 2024-12-01T17:08:35,111 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T17:08:35,117 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemState assigned to meta. Checking AM states. 2024-12-01T17:08:35,118 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T17:08:35,118 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemState assigned. 2024-12-01T17:08:35,118 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-01T17:08:35,122 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-01T17:08:35,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733072915123 (current time:1733072915123). 2024-12-01T17:08:35,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-01T17:08:35,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-01T17:08:35,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-01T17:08:35,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@285fcf89, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:08:35,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:08:35,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:08:35,138 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:08:35,138 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:08:35,139 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:08:35,139 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ec7a892, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:08:35,139 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:08:35,139 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:08:35,140 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:08:35,141 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40190, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:08:35,142 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f1f47ba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:08:35,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:08:35,144 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:08:35,145 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:08:35,147 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33064, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:08:35,149 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737. 2024-12-01T17:08:35,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:08:35,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:08:35,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:08:35,149 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:08:35,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1920576e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:08:35,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:08:35,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:08:35,160 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:08:35,161 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:08:35,161 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:08:35,161 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29482134, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:08:35,161 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:08:35,161 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:08:35,162 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:08:35,164 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40212, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:08:35,165 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@302e200, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:08:35,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:08:35,167 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:08:35,168 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:08:35,169 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33078, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:08:35,171 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:08:35,171 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:08:35,173 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37190, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:08:35,174 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737. 2024-12-01T17:08:35,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor278.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:08:35,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:08:35,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:08:35,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-01T17:08:35,175 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:08:35,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-01T17:08:35,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=101, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-01T17:08:35,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 101 2024-12-01T17:08:35,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-01T17:08:35,183 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-01T17:08:35,185 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-01T17:08:35,188 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-01T17:08:35,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742021_1197 (size=170) 2024-12-01T17:08:35,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742021_1197 (size=170) 2024-12-01T17:08:35,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-01T17:08:35,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742021_1197 (size=170) 2024-12-01T17:08:35,293 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-01T17:08:35,293 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c4140f72b90ed232a7be58f97bfa7f81}, {pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b3d164d1f6b75d91a9739eb445f344ac}] 2024-12-01T17:08:35,294 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b3d164d1f6b75d91a9739eb445f344ac 2024-12-01T17:08:35,295 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c4140f72b90ed232a7be58f97bfa7f81 2024-12-01T17:08:35,448 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36521 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=103 2024-12-01T17:08:35,448 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1733072913961.b3d164d1f6b75d91a9739eb445f344ac. 2024-12-01T17:08:35,448 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegion(2603): Flush status journal for b3d164d1f6b75d91a9739eb445f344ac: 2024-12-01T17:08:35,449 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1733072913961.b3d164d1f6b75d91a9739eb445f344ac. for emptySnaptb0-testExportFileSystemState completed. 2024-12-01T17:08:35,449 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1733072913961.b3d164d1f6b75d91a9739eb445f344ac.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-01T17:08:35,449 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:08:35,449 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-01T17:08:35,450 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44753 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=102 2024-12-01T17:08:35,450 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1733072913961.c4140f72b90ed232a7be58f97bfa7f81. 2024-12-01T17:08:35,450 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.HRegion(2603): Flush status journal for c4140f72b90ed232a7be58f97bfa7f81: 2024-12-01T17:08:35,450 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1733072913961.c4140f72b90ed232a7be58f97bfa7f81. for emptySnaptb0-testExportFileSystemState completed. 2024-12-01T17:08:35,451 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1733072913961.c4140f72b90ed232a7be58f97bfa7f81.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-01T17:08:35,451 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:08:35,451 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-01T17:08:35,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-01T17:08:35,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742022_1198 (size=71) 2024-12-01T17:08:35,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742022_1198 (size=71) 2024-12-01T17:08:35,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742022_1198 (size=71) 2024-12-01T17:08:35,637 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1733072913961.b3d164d1f6b75d91a9739eb445f344ac. 2024-12-01T17:08:35,637 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-12-01T17:08:35,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=103 2024-12-01T17:08:35,638 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region b3d164d1f6b75d91a9739eb445f344ac 2024-12-01T17:08:35,638 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b3d164d1f6b75d91a9739eb445f344ac 2024-12-01T17:08:35,644 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=103, ppid=101, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b3d164d1f6b75d91a9739eb445f344ac in 347 msec 2024-12-01T17:08:35,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742023_1199 (size=71) 2024-12-01T17:08:35,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742023_1199 (size=71) 2024-12-01T17:08:35,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742023_1199 (size=71) 2024-12-01T17:08:35,682 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1733072913961.c4140f72b90ed232a7be58f97bfa7f81. 2024-12-01T17:08:35,682 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=102 2024-12-01T17:08:35,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=102 2024-12-01T17:08:35,683 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region c4140f72b90ed232a7be58f97bfa7f81 2024-12-01T17:08:35,683 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c4140f72b90ed232a7be58f97bfa7f81 2024-12-01T17:08:35,687 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=102, resume processing ppid=101 2024-12-01T17:08:35,688 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-01T17:08:35,688 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=102, ppid=101, state=SUCCESS, hasLock=false; SnapshotRegionProcedure c4140f72b90ed232a7be58f97bfa7f81 in 391 msec 2024-12-01T17:08:35,689 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-01T17:08:35,691 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-01T17:08:35,691 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemState 2024-12-01T17:08:35,692 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState 2024-12-01T17:08:35,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742024_1200 (size=552) 2024-12-01T17:08:35,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742024_1200 (size=552) 2024-12-01T17:08:35,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742024_1200 (size=552) 2024-12-01T17:08:35,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-01T17:08:35,852 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-01T17:08:35,858 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-01T17:08:35,882 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-01T17:08:35,883 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/emptySnaptb0-testExportFileSystemState 2024-12-01T17:08:35,885 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-01T17:08:35,885 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 101 2024-12-01T17:08:35,888 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=101, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 708 msec 2024-12-01T17:08:36,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-01T17:08:36,321 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-12-01T17:08:36,337 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='09b7efd22fb7bc5842ed3d322e5b2a5ca', locateType=CURRENT is [region=testtb-testExportFileSystemState,,1733072913961.c4140f72b90ed232a7be58f97bfa7f81., hostname=b87883a7882d,44753,1733072737741, seqNum=2] 2024-12-01T17:08:36,338 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='184189dbc1376692e31f2ef60a31766d8', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1733072913961.b3d164d1f6b75d91a9739eb445f344ac., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:08:36,339 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='251eae5d1b931c8385c96d6ee8602130c', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1733072913961.b3d164d1f6b75d91a9739eb445f344ac., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:08:36,343 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='3c39980b09cec5302c5e29605c305fdfb', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1733072913961.b3d164d1f6b75d91a9739eb445f344ac., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:08:36,344 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='44101f6b5eec840aa32ab56df9ab42d6c', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1733072913961.b3d164d1f6b75d91a9739eb445f344ac., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:08:36,347 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='5b82f1bb3650da24358e61e213074fb73', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1733072913961.b3d164d1f6b75d91a9739eb445f344ac., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:08:36,348 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='6a922a2f60df3801400259730428f36d2', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1733072913961.b3d164d1f6b75d91a9739eb445f344ac., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:08:36,366 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44753 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,,1733072913961.c4140f72b90ed232a7be58f97bfa7f81. with WAL disabled. Data may be lost in the event of a crash. 2024-12-01T17:08:36,369 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36521 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,1,1733072913961.b3d164d1f6b75d91a9739eb445f344ac. with WAL disabled. Data may be lost in the event of a crash. 2024-12-01T17:08:36,380 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-01T17:08:36,385 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemState 2024-12-01T17:08:36,385 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemState,,1733072913961.c4140f72b90ed232a7be58f97bfa7f81. 2024-12-01T17:08:36,385 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T17:08:36,388 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-01T17:08:36,397 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-01T17:08:36,408 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-01T17:08:36,429 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-01T17:08:36,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733072916429 (current time:1733072916429). 2024-12-01T17:08:36,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-01T17:08:36,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-01T17:08:36,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-01T17:08:36,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57d086f0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:08:36,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:08:36,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:08:36,462 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:08:36,463 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:08:36,463 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:08:36,463 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23beb3eb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:08:36,463 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:08:36,463 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:08:36,464 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:08:36,465 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40230, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:08:36,466 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5937c6ac, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:08:36,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:08:36,468 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:08:36,468 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:08:36,470 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33090, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:08:36,472 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737. 2024-12-01T17:08:36,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:08:36,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:08:36,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:08:36,473 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:08:36,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@38114704, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:08:36,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:08:36,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:08:36,483 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:08:36,484 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:08:36,484 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:08:36,484 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@698dfd90, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:08:36,484 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:08:36,485 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:08:36,485 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:08:36,487 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40248, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:08:36,489 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73052ea6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:08:36,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:08:36,492 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:08:36,493 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:08:36,495 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33098, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:08:36,498 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:08:36,498 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:08:36,507 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37192, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:08:36,510 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737. 2024-12-01T17:08:36,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor278.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:08:36,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:08:36,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:08:36,510 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:08:36,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-01T17:08:36,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-01T17:08:36,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-01T17:08:36,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-01T17:08:36,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-01T17:08:36,515 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-01T17:08:36,517 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-01T17:08:36,523 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-01T17:08:36,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-01T17:08:36,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742025_1201 (size=165) 2024-12-01T17:08:36,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742025_1201 (size=165) 2024-12-01T17:08:36,675 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-01T17:08:36,675 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c4140f72b90ed232a7be58f97bfa7f81}, {pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b3d164d1f6b75d91a9739eb445f344ac}] 2024-12-01T17:08:36,677 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b3d164d1f6b75d91a9739eb445f344ac 2024-12-01T17:08:36,678 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c4140f72b90ed232a7be58f97bfa7f81 2024-12-01T17:08:36,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742025_1201 (size=165) 2024-12-01T17:08:36,832 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44753 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=105 2024-12-01T17:08:36,833 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36521 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=106 2024-12-01T17:08:36,833 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1733072913961.b3d164d1f6b75d91a9739eb445f344ac. 2024-12-01T17:08:36,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-01T17:08:36,833 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1733072913961.c4140f72b90ed232a7be58f97bfa7f81. 2024-12-01T17:08:36,833 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2902): Flushing c4140f72b90ed232a7be58f97bfa7f81 1/1 column families, dataSize=333 B heapSize=976 B 2024-12-01T17:08:36,833 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2902): Flushing b3d164d1f6b75d91a9739eb445f344ac 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-12-01T17:08:36,858 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemState/b3d164d1f6b75d91a9739eb445f344ac/.tmp/cf/c581d785be1942e6b799e8e97e3fee59 is 71, key is 177620159c764b20c8ba5370dcee7790/cf:q/1733072916369/Put/seqid=0 2024-12-01T17:08:36,868 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemState/c4140f72b90ed232a7be58f97bfa7f81/.tmp/cf/fdb0e8dddeb04a4082e97e0c1633aa23 is 71, key is 0174d8ab28f099aa848e611205f5fa91/cf:q/1733072916366/Put/seqid=0 2024-12-01T17:08:36,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742026_1202 (size=8188) 2024-12-01T17:08:36,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742026_1202 (size=8188) 2024-12-01T17:08:36,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742026_1202 (size=8188) 2024-12-01T17:08:36,880 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.93 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemState/b3d164d1f6b75d91a9739eb445f344ac/.tmp/cf/c581d785be1942e6b799e8e97e3fee59 2024-12-01T17:08:36,897 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemState/b3d164d1f6b75d91a9739eb445f344ac/.tmp/cf/c581d785be1942e6b799e8e97e3fee59 as hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemState/b3d164d1f6b75d91a9739eb445f344ac/cf/c581d785be1942e6b799e8e97e3fee59 2024-12-01T17:08:36,912 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemState/b3d164d1f6b75d91a9739eb445f344ac/cf/c581d785be1942e6b799e8e97e3fee59, entries=45, sequenceid=6, filesize=8.0 K 2024-12-01T17:08:36,913 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(3140): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for b3d164d1f6b75d91a9739eb445f344ac in 80ms, sequenceid=6, compaction requested=false 2024-12-01T17:08:36,913 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemState' 2024-12-01T17:08:36,914 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2603): Flush status journal for b3d164d1f6b75d91a9739eb445f344ac: 2024-12-01T17:08:36,914 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1733072913961.b3d164d1f6b75d91a9739eb445f344ac. for snaptb0-testExportFileSystemState completed. 2024-12-01T17:08:36,914 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1733072913961.b3d164d1f6b75d91a9739eb445f344ac.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-01T17:08:36,915 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:08:36,915 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemState/b3d164d1f6b75d91a9739eb445f344ac/cf/c581d785be1942e6b799e8e97e3fee59] hfiles 2024-12-01T17:08:36,915 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemState/b3d164d1f6b75d91a9739eb445f344ac/cf/c581d785be1942e6b799e8e97e3fee59 for snapshot=snaptb0-testExportFileSystemState 2024-12-01T17:08:36,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742027_1203 (size=5424) 2024-12-01T17:08:36,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742027_1203 (size=5424) 2024-12-01T17:08:36,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742027_1203 (size=5424) 2024-12-01T17:08:36,926 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=333 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemState/c4140f72b90ed232a7be58f97bfa7f81/.tmp/cf/fdb0e8dddeb04a4082e97e0c1633aa23 2024-12-01T17:08:36,933 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemState/c4140f72b90ed232a7be58f97bfa7f81/.tmp/cf/fdb0e8dddeb04a4082e97e0c1633aa23 as hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemState/c4140f72b90ed232a7be58f97bfa7f81/cf/fdb0e8dddeb04a4082e97e0c1633aa23 2024-12-01T17:08:36,943 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemState/c4140f72b90ed232a7be58f97bfa7f81/cf/fdb0e8dddeb04a4082e97e0c1633aa23, entries=5, sequenceid=6, filesize=5.3 K 2024-12-01T17:08:36,944 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(3140): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for c4140f72b90ed232a7be58f97bfa7f81 in 111ms, sequenceid=6, compaction requested=false 2024-12-01T17:08:36,944 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2603): Flush status journal for c4140f72b90ed232a7be58f97bfa7f81: 2024-12-01T17:08:36,944 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1733072913961.c4140f72b90ed232a7be58f97bfa7f81. for snaptb0-testExportFileSystemState completed. 2024-12-01T17:08:36,944 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1733072913961.c4140f72b90ed232a7be58f97bfa7f81.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-01T17:08:36,944 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:08:36,945 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemState/c4140f72b90ed232a7be58f97bfa7f81/cf/fdb0e8dddeb04a4082e97e0c1633aa23] hfiles 2024-12-01T17:08:36,945 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemState/c4140f72b90ed232a7be58f97bfa7f81/cf/fdb0e8dddeb04a4082e97e0c1633aa23 for snapshot=snaptb0-testExportFileSystemState 2024-12-01T17:08:36,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742028_1204 (size=110) 2024-12-01T17:08:36,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742028_1204 (size=110) 2024-12-01T17:08:36,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742028_1204 (size=110) 2024-12-01T17:08:36,963 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1733072913961.b3d164d1f6b75d91a9739eb445f344ac. 2024-12-01T17:08:36,964 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=106 2024-12-01T17:08:36,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=106 2024-12-01T17:08:36,964 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region b3d164d1f6b75d91a9739eb445f344ac 2024-12-01T17:08:36,964 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b3d164d1f6b75d91a9739eb445f344ac 2024-12-01T17:08:36,967 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=106, ppid=104, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b3d164d1f6b75d91a9739eb445f344ac in 290 msec 2024-12-01T17:08:37,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742029_1205 (size=110) 2024-12-01T17:08:37,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742029_1205 (size=110) 2024-12-01T17:08:37,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742029_1205 (size=110) 2024-12-01T17:08:37,016 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1733072913961.c4140f72b90ed232a7be58f97bfa7f81. 2024-12-01T17:08:37,016 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-12-01T17:08:37,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=105 2024-12-01T17:08:37,017 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region c4140f72b90ed232a7be58f97bfa7f81 2024-12-01T17:08:37,017 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c4140f72b90ed232a7be58f97bfa7f81 2024-12-01T17:08:37,021 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=105, resume processing ppid=104 2024-12-01T17:08:37,021 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=105, ppid=104, state=SUCCESS, hasLock=false; SnapshotRegionProcedure c4140f72b90ed232a7be58f97bfa7f81 in 343 msec 2024-12-01T17:08:37,021 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-01T17:08:37,022 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-01T17:08:37,023 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-01T17:08:37,023 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemState 2024-12-01T17:08:37,024 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-01T17:08:37,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742030_1206 (size=630) 2024-12-01T17:08:37,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742030_1206 (size=630) 2024-12-01T17:08:37,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742030_1206 (size=630) 2024-12-01T17:08:37,127 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-01T17:08:37,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-01T17:08:37,166 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-01T17:08:37,167 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-01T17:08:37,169 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-01T17:08:37,169 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-01T17:08:37,172 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=104, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 657 msec 2024-12-01T17:08:37,268 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0003_000001 (auth:SIMPLE) from 127.0.0.1:56662 2024-12-01T17:08:37,294 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_2/usercache/jenkins/appcache/application_1733072744857_0003/container_1733072744857_0003_01_000001/launch_container.sh] 2024-12-01T17:08:37,294 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_2/usercache/jenkins/appcache/application_1733072744857_0003/container_1733072744857_0003_01_000001/container_tokens] 2024-12-01T17:08:37,294 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_2/usercache/jenkins/appcache/application_1733072744857_0003/container_1733072744857_0003_01_000001/sysfs] 2024-12-01T17:08:37,330 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-01T17:08:37,330 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-01T17:08:37,330 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-01T17:08:37,331 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-01T17:08:37,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-01T17:08:37,651 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-12-01T17:08:37,651 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072917651 2024-12-01T17:08:37,651 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:44451, tgtDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072917651, rawTgtDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072917651, srcFsUri=hdfs://localhost:44451, srcDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:08:37,699 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:44451, inputRoot=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:08:37,699 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-69539515_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072917651, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072917651/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-01T17:08:37,702 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-01T17:08:37,734 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testExportFileSystemState to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072917651/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-01T17:08:37,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742031_1207 (size=165) 2024-12-01T17:08:37,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742031_1207 (size=165) 2024-12-01T17:08:37,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742031_1207 (size=165) 2024-12-01T17:08:37,905 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-01T17:08:37,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742032_1208 (size=630) 2024-12-01T17:08:37,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742032_1208 (size=630) 2024-12-01T17:08:37,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742032_1208 (size=630) 2024-12-01T17:08:37,962 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:08:37,963 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:08:37,963 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:08:39,130 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/hadoop-5332145620450496893.jar 2024-12-01T17:08:39,131 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:08:39,131 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:08:39,183 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/hadoop-7618124225644915385.jar 2024-12-01T17:08:39,184 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:08:39,184 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:08:39,184 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:08:39,184 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:08:39,185 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:08:39,185 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:08:39,185 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-01T17:08:39,185 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-01T17:08:39,186 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-01T17:08:39,186 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-01T17:08:39,186 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-01T17:08:39,186 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-01T17:08:39,186 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-01T17:08:39,186 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-01T17:08:39,187 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-01T17:08:39,187 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-01T17:08:39,187 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-01T17:08:39,187 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:08:39,187 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:08:39,188 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-01T17:08:39,188 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:08:39,188 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:08:39,188 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-01T17:08:39,189 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-01T17:08:39,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742033_1209 (size=24020) 2024-12-01T17:08:39,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742033_1209 (size=24020) 2024-12-01T17:08:39,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742033_1209 (size=24020) 2024-12-01T17:08:39,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742034_1210 (size=77755) 2024-12-01T17:08:39,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742034_1210 (size=77755) 2024-12-01T17:08:39,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742034_1210 (size=77755) 2024-12-01T17:08:39,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742035_1211 (size=131360) 2024-12-01T17:08:39,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742035_1211 (size=131360) 2024-12-01T17:08:39,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742035_1211 (size=131360) 2024-12-01T17:08:39,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742036_1212 (size=111793) 2024-12-01T17:08:39,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742036_1212 (size=111793) 2024-12-01T17:08:39,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742036_1212 (size=111793) 2024-12-01T17:08:39,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742037_1213 (size=1832290) 2024-12-01T17:08:39,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742037_1213 (size=1832290) 2024-12-01T17:08:39,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742037_1213 (size=1832290) 2024-12-01T17:08:39,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742038_1214 (size=8360005) 2024-12-01T17:08:39,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742038_1214 (size=8360005) 2024-12-01T17:08:39,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742038_1214 (size=8360005) 2024-12-01T17:08:39,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742039_1215 (size=503880) 2024-12-01T17:08:39,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742039_1215 (size=503880) 2024-12-01T17:08:39,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742039_1215 (size=503880) 2024-12-01T17:08:39,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742040_1216 (size=440956) 2024-12-01T17:08:39,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742040_1216 (size=440956) 2024-12-01T17:08:39,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742040_1216 (size=440956) 2024-12-01T17:08:39,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742041_1217 (size=322274) 2024-12-01T17:08:39,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742041_1217 (size=322274) 2024-12-01T17:08:39,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742041_1217 (size=322274) 2024-12-01T17:08:39,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742042_1218 (size=20406) 2024-12-01T17:08:39,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742042_1218 (size=20406) 2024-12-01T17:08:39,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742042_1218 (size=20406) 2024-12-01T17:08:39,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742043_1219 (size=45609) 2024-12-01T17:08:39,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742043_1219 (size=45609) 2024-12-01T17:08:39,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742043_1219 (size=45609) 2024-12-01T17:08:39,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742044_1220 (size=136454) 2024-12-01T17:08:39,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742044_1220 (size=136454) 2024-12-01T17:08:39,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742044_1220 (size=136454) 2024-12-01T17:08:39,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742045_1221 (size=6424742) 2024-12-01T17:08:39,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742045_1221 (size=6424742) 2024-12-01T17:08:39,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742045_1221 (size=6424742) 2024-12-01T17:08:39,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742046_1222 (size=1597136) 2024-12-01T17:08:39,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742046_1222 (size=1597136) 2024-12-01T17:08:39,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742046_1222 (size=1597136) 2024-12-01T17:08:39,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742047_1223 (size=30873) 2024-12-01T17:08:39,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742047_1223 (size=30873) 2024-12-01T17:08:39,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742047_1223 (size=30873) 2024-12-01T17:08:39,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742048_1224 (size=29229) 2024-12-01T17:08:39,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742048_1224 (size=29229) 2024-12-01T17:08:39,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742048_1224 (size=29229) 2024-12-01T17:08:39,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742049_1225 (size=903864) 2024-12-01T17:08:39,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742049_1225 (size=903864) 2024-12-01T17:08:39,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742049_1225 (size=903864) 2024-12-01T17:08:39,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742050_1226 (size=5175431) 2024-12-01T17:08:39,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742050_1226 (size=5175431) 2024-12-01T17:08:39,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742050_1226 (size=5175431) 2024-12-01T17:08:39,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742051_1227 (size=232881) 2024-12-01T17:08:39,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742051_1227 (size=232881) 2024-12-01T17:08:39,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742051_1227 (size=232881) 2024-12-01T17:08:39,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742052_1228 (size=1323991) 2024-12-01T17:08:39,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742052_1228 (size=1323991) 2024-12-01T17:08:39,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742052_1228 (size=1323991) 2024-12-01T17:08:39,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742053_1229 (size=4695811) 2024-12-01T17:08:39,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742053_1229 (size=4695811) 2024-12-01T17:08:39,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742053_1229 (size=4695811) 2024-12-01T17:08:39,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742054_1230 (size=1877034) 2024-12-01T17:08:39,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742054_1230 (size=1877034) 2024-12-01T17:08:39,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742054_1230 (size=1877034) 2024-12-01T17:08:39,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742055_1231 (size=217555) 2024-12-01T17:08:39,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742055_1231 (size=217555) 2024-12-01T17:08:39,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742055_1231 (size=217555) 2024-12-01T17:08:39,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742056_1232 (size=4188619) 2024-12-01T17:08:39,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742056_1232 (size=4188619) 2024-12-01T17:08:39,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742056_1232 (size=4188619) 2024-12-01T17:08:39,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742057_1233 (size=127628) 2024-12-01T17:08:39,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742057_1233 (size=127628) 2024-12-01T17:08:39,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742057_1233 (size=127628) 2024-12-01T17:08:39,514 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-01T17:08:39,516 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemState' hfile list 2024-12-01T17:08:39,519 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.0 K 2024-12-01T17:08:39,519 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.3 K 2024-12-01T17:08:39,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742058_1234 (size=447) 2024-12-01T17:08:39,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742058_1234 (size=447) 2024-12-01T17:08:39,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742058_1234 (size=447) 2024-12-01T17:08:39,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742059_1235 (size=21) 2024-12-01T17:08:39,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742059_1235 (size=21) 2024-12-01T17:08:39,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742059_1235 (size=21) 2024-12-01T17:08:39,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742060_1236 (size=304087) 2024-12-01T17:08:39,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742060_1236 (size=304087) 2024-12-01T17:08:39,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742060_1236 (size=304087) 2024-12-01T17:08:39,561 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-01T17:08:39,561 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-01T17:08:40,128 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0004_000001 (auth:SIMPLE) from 127.0.0.1:57588 2024-12-01T17:08:41,091 DEBUG [master/b87883a7882d:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region b3d164d1f6b75d91a9739eb445f344ac changed from -1.0 to 0.0, refreshing cache 2024-12-01T17:08:41,091 DEBUG [master/b87883a7882d:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region c4140f72b90ed232a7be58f97bfa7f81 changed from -1.0 to 0.0, refreshing cache 2024-12-01T17:08:42,834 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-01T17:08:44,679 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0004_000001 (auth:SIMPLE) from 127.0.0.1:53364 2024-12-01T17:08:44,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742061_1237 (size=349785) 2024-12-01T17:08:44,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742061_1237 (size=349785) 2024-12-01T17:08:44,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742061_1237 (size=349785) 2024-12-01T17:08:46,891 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0004_000001 (auth:SIMPLE) from 127.0.0.1:57622 2024-12-01T17:08:46,891 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0004_000001 (auth:SIMPLE) from 127.0.0.1:46652 2024-12-01T17:08:51,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742062_1238 (size=8188) 2024-12-01T17:08:51,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742062_1238 (size=8188) 2024-12-01T17:08:51,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742062_1238 (size=8188) 2024-12-01T17:08:51,470 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_0/usercache/jenkins/appcache/application_1733072744857_0004/container_1733072744857_0004_01_000002/launch_container.sh] 2024-12-01T17:08:51,471 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_0/usercache/jenkins/appcache/application_1733072744857_0004/container_1733072744857_0004_01_000002/container_tokens] 2024-12-01T17:08:51,471 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_0/usercache/jenkins/appcache/application_1733072744857_0004/container_1733072744857_0004_01_000002/sysfs] 2024-12-01T17:08:52,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742064_1240 (size=5424) 2024-12-01T17:08:52,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742064_1240 (size=5424) 2024-12-01T17:08:52,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742064_1240 (size=5424) 2024-12-01T17:08:52,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742063_1239 (size=22162) 2024-12-01T17:08:52,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742063_1239 (size=22162) 2024-12-01T17:08:52,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742063_1239 (size=22162) 2024-12-01T17:08:52,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742065_1241 (size=466) 2024-12-01T17:08:52,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742065_1241 (size=466) 2024-12-01T17:08:52,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742065_1241 (size=466) 2024-12-01T17:08:52,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742066_1242 (size=22162) 2024-12-01T17:08:52,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742066_1242 (size=22162) 2024-12-01T17:08:52,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742066_1242 (size=22162) 2024-12-01T17:08:52,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742067_1243 (size=349785) 2024-12-01T17:08:52,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742067_1243 (size=349785) 2024-12-01T17:08:52,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742067_1243 (size=349785) 2024-12-01T17:08:52,761 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0004_000001 (auth:SIMPLE) from 127.0.0.1:36142 2024-12-01T17:08:52,777 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_1/usercache/jenkins/appcache/application_1733072744857_0004/container_1733072744857_0004_01_000003/launch_container.sh] 2024-12-01T17:08:52,777 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_1/usercache/jenkins/appcache/application_1733072744857_0004/container_1733072744857_0004_01_000003/container_tokens] 2024-12-01T17:08:52,777 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_1/usercache/jenkins/appcache/application_1733072744857_0004/container_1733072744857_0004_01_000003/sysfs] 2024-12-01T17:08:54,701 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-01T17:08:54,703 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-01T17:08:54,709 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemState 2024-12-01T17:08:54,709 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-01T17:08:54,709 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-01T17:08:54,709 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-69539515_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-01T17:08:54,710 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-01T17:08:54,710 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-01T17:08:54,710 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-69539515_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072917651/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072917651/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-01T17:08:54,710 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072917651/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-01T17:08:54,710 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072917651/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-01T17:08:54,717 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testtb-testExportFileSystemState 2024-12-01T17:08:54,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=107, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState 2024-12-01T17:08:54,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-12-01T17:08:54,720 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733072934720"}]},"ts":"1733072934720"} 2024-12-01T17:08:54,722 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLING in hbase:meta 2024-12-01T17:08:54,722 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemState to state=DISABLING 2024-12-01T17:08:54,723 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=108, ppid=107, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState}] 2024-12-01T17:08:54,724 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=c4140f72b90ed232a7be58f97bfa7f81, UNASSIGN}, {pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=b3d164d1f6b75d91a9739eb445f344ac, UNASSIGN}] 2024-12-01T17:08:54,725 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=b3d164d1f6b75d91a9739eb445f344ac, UNASSIGN 2024-12-01T17:08:54,725 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=c4140f72b90ed232a7be58f97bfa7f81, UNASSIGN 2024-12-01T17:08:54,725 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=109 updating hbase:meta row=c4140f72b90ed232a7be58f97bfa7f81, regionState=CLOSING, regionLocation=b87883a7882d,44753,1733072737741 2024-12-01T17:08:54,725 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=110 updating hbase:meta row=b3d164d1f6b75d91a9739eb445f344ac, regionState=CLOSING, regionLocation=b87883a7882d,36521,1733072737896 2024-12-01T17:08:54,727 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=c4140f72b90ed232a7be58f97bfa7f81, UNASSIGN because future has completed 2024-12-01T17:08:54,727 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-01T17:08:54,727 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=111, ppid=109, state=RUNNABLE, hasLock=false; CloseRegionProcedure c4140f72b90ed232a7be58f97bfa7f81, server=b87883a7882d,44753,1733072737741}] 2024-12-01T17:08:54,727 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=b3d164d1f6b75d91a9739eb445f344ac, UNASSIGN because future has completed 2024-12-01T17:08:54,727 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-01T17:08:54,728 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=112, ppid=110, state=RUNNABLE, hasLock=false; CloseRegionProcedure b3d164d1f6b75d91a9739eb445f344ac, server=b87883a7882d,36521,1733072737896}] 2024-12-01T17:08:54,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-12-01T17:08:54,879 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(122): Close c4140f72b90ed232a7be58f97bfa7f81 2024-12-01T17:08:54,879 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-01T17:08:54,880 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1722): Closing c4140f72b90ed232a7be58f97bfa7f81, disabling compactions & flushes 2024-12-01T17:08:54,880 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1733072913961.c4140f72b90ed232a7be58f97bfa7f81. 2024-12-01T17:08:54,880 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1733072913961.c4140f72b90ed232a7be58f97bfa7f81. 2024-12-01T17:08:54,880 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1733072913961.c4140f72b90ed232a7be58f97bfa7f81. after waiting 0 ms 2024-12-01T17:08:54,880 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1733072913961.c4140f72b90ed232a7be58f97bfa7f81. 2024-12-01T17:08:54,880 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(122): Close b3d164d1f6b75d91a9739eb445f344ac 2024-12-01T17:08:54,880 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-01T17:08:54,880 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1722): Closing b3d164d1f6b75d91a9739eb445f344ac, disabling compactions & flushes 2024-12-01T17:08:54,880 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1733072913961.b3d164d1f6b75d91a9739eb445f344ac. 2024-12-01T17:08:54,880 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1733072913961.b3d164d1f6b75d91a9739eb445f344ac. 2024-12-01T17:08:54,880 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1733072913961.b3d164d1f6b75d91a9739eb445f344ac. after waiting 0 ms 2024-12-01T17:08:54,880 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1733072913961.b3d164d1f6b75d91a9739eb445f344ac. 2024-12-01T17:08:54,885 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemState/b3d164d1f6b75d91a9739eb445f344ac/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-01T17:08:54,886 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemState/c4140f72b90ed232a7be58f97bfa7f81/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-01T17:08:54,886 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-01T17:08:54,886 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1733072913961.b3d164d1f6b75d91a9739eb445f344ac. 2024-12-01T17:08:54,886 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-01T17:08:54,886 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1733072913961.c4140f72b90ed232a7be58f97bfa7f81. 2024-12-01T17:08:54,886 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1676): Region close journal for b3d164d1f6b75d91a9739eb445f344ac: Waiting for close lock at 1733072934880Running coprocessor pre-close hooks at 1733072934880Disabling compacts and flushes for region at 1733072934880Disabling writes for close at 1733072934880Writing region close event to WAL at 1733072934881 (+1 ms)Running coprocessor post-close hooks at 1733072934886 (+5 ms)Closed at 1733072934886 2024-12-01T17:08:54,886 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1676): Region close journal for c4140f72b90ed232a7be58f97bfa7f81: Waiting for close lock at 1733072934879Running coprocessor pre-close hooks at 1733072934879Disabling compacts and flushes for region at 1733072934879Disabling writes for close at 1733072934880 (+1 ms)Writing region close event to WAL at 1733072934881 (+1 ms)Running coprocessor post-close hooks at 1733072934886 (+5 ms)Closed at 1733072934886 2024-12-01T17:08:54,888 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(157): Closed b3d164d1f6b75d91a9739eb445f344ac 2024-12-01T17:08:54,889 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=110 updating hbase:meta row=b3d164d1f6b75d91a9739eb445f344ac, regionState=CLOSED 2024-12-01T17:08:54,889 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(157): Closed c4140f72b90ed232a7be58f97bfa7f81 2024-12-01T17:08:54,891 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=112, ppid=110, state=RUNNABLE, hasLock=false; CloseRegionProcedure b3d164d1f6b75d91a9739eb445f344ac, server=b87883a7882d,36521,1733072737896 because future has completed 2024-12-01T17:08:54,897 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=109 updating hbase:meta row=c4140f72b90ed232a7be58f97bfa7f81, regionState=CLOSED 2024-12-01T17:08:54,899 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=111, ppid=109, state=RUNNABLE, hasLock=false; CloseRegionProcedure c4140f72b90ed232a7be58f97bfa7f81, server=b87883a7882d,44753,1733072737741 because future has completed 2024-12-01T17:08:54,900 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=112, resume processing ppid=110 2024-12-01T17:08:54,900 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=112, ppid=110, state=SUCCESS, hasLock=false; CloseRegionProcedure b3d164d1f6b75d91a9739eb445f344ac, server=b87883a7882d,36521,1733072737896 in 170 msec 2024-12-01T17:08:54,901 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=110, ppid=108, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=b3d164d1f6b75d91a9739eb445f344ac, UNASSIGN in 176 msec 2024-12-01T17:08:54,901 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=111, resume processing ppid=109 2024-12-01T17:08:54,901 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=111, ppid=109, state=SUCCESS, hasLock=false; CloseRegionProcedure c4140f72b90ed232a7be58f97bfa7f81, server=b87883a7882d,44753,1733072737741 in 172 msec 2024-12-01T17:08:54,902 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=109, resume processing ppid=108 2024-12-01T17:08:54,902 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=109, ppid=108, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=c4140f72b90ed232a7be58f97bfa7f81, UNASSIGN in 177 msec 2024-12-01T17:08:54,904 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=108, resume processing ppid=107 2024-12-01T17:08:54,904 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=108, ppid=107, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState in 180 msec 2024-12-01T17:08:54,905 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733072934905"}]},"ts":"1733072934905"} 2024-12-01T17:08:54,908 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLED in hbase:meta 2024-12-01T17:08:54,908 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemState to state=DISABLED 2024-12-01T17:08:54,910 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=107, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState in 192 msec 2024-12-01T17:08:55,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-12-01T17:08:55,040 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemState completed 2024-12-01T17:08:55,040 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testtb-testExportFileSystemState 2024-12-01T17:08:55,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=113, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-01T17:08:55,042 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=113, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-01T17:08:55,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemState 2024-12-01T17:08:55,043 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=113, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-01T17:08:55,046 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36521 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemState 2024-12-01T17:08:55,047 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemState/c4140f72b90ed232a7be58f97bfa7f81 2024-12-01T17:08:55,047 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemState/b3d164d1f6b75d91a9739eb445f344ac 2024-12-01T17:08:55,049 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemState/b3d164d1f6b75d91a9739eb445f344ac/cf, FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemState/b3d164d1f6b75d91a9739eb445f344ac/recovered.edits] 2024-12-01T17:08:55,049 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemState/c4140f72b90ed232a7be58f97bfa7f81/cf, FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemState/c4140f72b90ed232a7be58f97bfa7f81/recovered.edits] 2024-12-01T17:08:55,054 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemState/c4140f72b90ed232a7be58f97bfa7f81/cf/fdb0e8dddeb04a4082e97e0c1633aa23 to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testExportFileSystemState/c4140f72b90ed232a7be58f97bfa7f81/cf/fdb0e8dddeb04a4082e97e0c1633aa23 2024-12-01T17:08:55,054 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemState/b3d164d1f6b75d91a9739eb445f344ac/cf/c581d785be1942e6b799e8e97e3fee59 to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testExportFileSystemState/b3d164d1f6b75d91a9739eb445f344ac/cf/c581d785be1942e6b799e8e97e3fee59 2024-12-01T17:08:55,056 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemState/c4140f72b90ed232a7be58f97bfa7f81/recovered.edits/9.seqid to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testExportFileSystemState/c4140f72b90ed232a7be58f97bfa7f81/recovered.edits/9.seqid 2024-12-01T17:08:55,056 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemState/b3d164d1f6b75d91a9739eb445f344ac/recovered.edits/9.seqid to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testExportFileSystemState/b3d164d1f6b75d91a9739eb445f344ac/recovered.edits/9.seqid 2024-12-01T17:08:55,057 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemState/c4140f72b90ed232a7be58f97bfa7f81 2024-12-01T17:08:55,057 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemState/b3d164d1f6b75d91a9739eb445f344ac 2024-12-01T17:08:55,057 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemState regions 2024-12-01T17:08:55,059 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=113, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-01T17:08:55,062 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemState from hbase:meta 2024-12-01T17:08:55,067 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemState' descriptor. 2024-12-01T17:08:55,069 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=113, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-01T17:08:55,069 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemState' from region states. 2024-12-01T17:08:55,069 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,,1733072913961.c4140f72b90ed232a7be58f97bfa7f81.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733072935069"}]},"ts":"9223372036854775807"} 2024-12-01T17:08:55,069 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,1,1733072913961.b3d164d1f6b75d91a9739eb445f344ac.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733072935069"}]},"ts":"9223372036854775807"} 2024-12-01T17:08:55,072 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-01T17:08:55,072 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => c4140f72b90ed232a7be58f97bfa7f81, NAME => 'testtb-testExportFileSystemState,,1733072913961.c4140f72b90ed232a7be58f97bfa7f81.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => b3d164d1f6b75d91a9739eb445f344ac, NAME => 'testtb-testExportFileSystemState,1,1733072913961.b3d164d1f6b75d91a9739eb445f344ac.', STARTKEY => '1', ENDKEY => ''}] 2024-12-01T17:08:55,072 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemState' as deleted. 2024-12-01T17:08:55,072 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733072935072"}]},"ts":"9223372036854775807"} 2024-12-01T17:08:55,074 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemState state from META 2024-12-01T17:08:55,075 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=113, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-01T17:08:55,076 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=113, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState in 35 msec 2024-12-01T17:08:55,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-01T17:08:55,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-01T17:08:55,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-01T17:08:55,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-01T17:08:55,128 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-01T17:08:55,129 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-01T17:08:55,129 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-01T17:08:55,129 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-01T17:08:55,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-01T17:08:55,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-01T17:08:55,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:08:55,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:08:55,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-01T17:08:55,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-01T17:08:55,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:08:55,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:08:55,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=113 2024-12-01T17:08:55,140 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemState 2024-12-01T17:08:55,140 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemState completed 2024-12-01T17:08:55,149 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "emptySnaptb0-testExportFileSystemState" type: DISABLED 2024-12-01T17:08:55,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemState 2024-12-01T17:08:55,153 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb0-testExportFileSystemState" type: DISABLED 2024-12-01T17:08:55,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemState 2024-12-01T17:08:55,181 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=810 (was 802) Potentially hanging thread: Thread-3709 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: MoveIntermediateToDone Thread #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 130826) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-69539515_22 at /127.0.0.1:50302 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MoveIntermediateToDone Thread #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1008666186_1 at /127.0.0.1:34226 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MoveIntermediateToDone Thread #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38877 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-69539515_22 at /127.0.0.1:34240 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-69539515_22 at /127.0.0.1:45542 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1008666186_1 at /127.0.0.1:50286 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1396781562) connection to localhost/127.0.0.1:42017 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42017 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=813 (was 812) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=723 (was 627) - SystemLoadAverage LEAK? -, ProcessCount=23 (was 29), AvailableMemoryMB=3489 (was 3457) - AvailableMemoryMB LEAK? - 2024-12-01T17:08:55,182 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=810 is superior to 500 2024-12-01T17:08:55,201 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=810, OpenFileDescriptor=813, MaxFileDescriptor=1048576, SystemLoadAverage=723, ProcessCount=22, AvailableMemoryMB=3487 2024-12-01T17:08:55,201 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=810 is superior to 500 2024-12-01T17:08:55,202 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T17:08:55,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=114, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports 2024-12-01T17:08:55,204 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_PRE_OPERATION 2024-12-01T17:08:55,205 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testtb-testConsecutiveExports" procId is: 114 2024-12-01T17:08:55,205 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:08:55,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-01T17:08:55,206 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-01T17:08:55,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742068_1244 (size=404) 2024-12-01T17:08:55,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742068_1244 (size=404) 2024-12-01T17:08:55,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742068_1244 (size=404) 2024-12-01T17:08:55,217 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => eb34f7e309a981aebf4709e257d001d6, NAME => 'testtb-testConsecutiveExports,1,1733072935202.eb34f7e309a981aebf4709e257d001d6.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:08:55,217 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 5429f20942a88aa4e36d8f0081a686f8, NAME => 'testtb-testConsecutiveExports,,1733072935202.5429f20942a88aa4e36d8f0081a686f8.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:08:55,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742069_1245 (size=65) 2024-12-01T17:08:55,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742069_1245 (size=65) 2024-12-01T17:08:55,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742069_1245 (size=65) 2024-12-01T17:08:55,230 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1733072935202.5429f20942a88aa4e36d8f0081a686f8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:08:55,230 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1722): Closing 5429f20942a88aa4e36d8f0081a686f8, disabling compactions & flushes 2024-12-01T17:08:55,230 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1733072935202.5429f20942a88aa4e36d8f0081a686f8. 2024-12-01T17:08:55,230 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1733072935202.5429f20942a88aa4e36d8f0081a686f8. 2024-12-01T17:08:55,230 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1733072935202.5429f20942a88aa4e36d8f0081a686f8. after waiting 0 ms 2024-12-01T17:08:55,230 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1733072935202.5429f20942a88aa4e36d8f0081a686f8. 2024-12-01T17:08:55,230 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1733072935202.5429f20942a88aa4e36d8f0081a686f8. 2024-12-01T17:08:55,230 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1676): Region close journal for 5429f20942a88aa4e36d8f0081a686f8: Waiting for close lock at 1733072935230Disabling compacts and flushes for region at 1733072935230Disabling writes for close at 1733072935230Writing region close event to WAL at 1733072935230Closed at 1733072935230 2024-12-01T17:08:55,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742070_1246 (size=65) 2024-12-01T17:08:55,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742070_1246 (size=65) 2024-12-01T17:08:55,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742070_1246 (size=65) 2024-12-01T17:08:55,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-01T17:08:55,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-01T17:08:55,644 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1733072935202.eb34f7e309a981aebf4709e257d001d6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:08:55,644 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1722): Closing eb34f7e309a981aebf4709e257d001d6, disabling compactions & flushes 2024-12-01T17:08:55,644 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1733072935202.eb34f7e309a981aebf4709e257d001d6. 2024-12-01T17:08:55,644 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1733072935202.eb34f7e309a981aebf4709e257d001d6. 2024-12-01T17:08:55,644 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1733072935202.eb34f7e309a981aebf4709e257d001d6. after waiting 0 ms 2024-12-01T17:08:55,644 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1733072935202.eb34f7e309a981aebf4709e257d001d6. 2024-12-01T17:08:55,644 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1733072935202.eb34f7e309a981aebf4709e257d001d6. 2024-12-01T17:08:55,644 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1676): Region close journal for eb34f7e309a981aebf4709e257d001d6: Waiting for close lock at 1733072935644Disabling compacts and flushes for region at 1733072935644Disabling writes for close at 1733072935644Writing region close event to WAL at 1733072935644Closed at 1733072935644 2024-12-01T17:08:55,647 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ADD_TO_META 2024-12-01T17:08:55,648 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,,1733072935202.5429f20942a88aa4e36d8f0081a686f8.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733072935648"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733072935648"}]},"ts":"1733072935648"} 2024-12-01T17:08:55,648 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,1,1733072935202.eb34f7e309a981aebf4709e257d001d6.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733072935648"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733072935648"}]},"ts":"1733072935648"} 2024-12-01T17:08:55,653 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-01T17:08:55,654 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-01T17:08:55,655 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733072935654"}]},"ts":"1733072935654"} 2024-12-01T17:08:55,657 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLING in hbase:meta 2024-12-01T17:08:55,658 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {b87883a7882d=0} racks are {/default-rack=0} 2024-12-01T17:08:55,659 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-01T17:08:55,660 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-01T17:08:55,660 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-01T17:08:55,660 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-01T17:08:55,660 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-01T17:08:55,660 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-01T17:08:55,660 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-01T17:08:55,660 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-01T17:08:55,660 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-01T17:08:55,660 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-01T17:08:55,660 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=5429f20942a88aa4e36d8f0081a686f8, ASSIGN}, {pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=eb34f7e309a981aebf4709e257d001d6, ASSIGN}] 2024-12-01T17:08:55,662 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=5429f20942a88aa4e36d8f0081a686f8, ASSIGN 2024-12-01T17:08:55,664 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=5429f20942a88aa4e36d8f0081a686f8, ASSIGN; state=OFFLINE, location=b87883a7882d,36521,1733072737896; forceNewPlan=false, retain=false 2024-12-01T17:08:55,666 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=eb34f7e309a981aebf4709e257d001d6, ASSIGN 2024-12-01T17:08:55,667 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=eb34f7e309a981aebf4709e257d001d6, ASSIGN; state=OFFLINE, location=b87883a7882d,44753,1733072737741; forceNewPlan=false, retain=false 2024-12-01T17:08:55,815 INFO [b87883a7882d:38737 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-01T17:08:55,815 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=116 updating hbase:meta row=eb34f7e309a981aebf4709e257d001d6, regionState=OPENING, regionLocation=b87883a7882d,44753,1733072737741 2024-12-01T17:08:55,816 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=5429f20942a88aa4e36d8f0081a686f8, regionState=OPENING, regionLocation=b87883a7882d,36521,1733072737896 2024-12-01T17:08:55,819 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=eb34f7e309a981aebf4709e257d001d6, ASSIGN because future has completed 2024-12-01T17:08:55,820 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE, hasLock=false; OpenRegionProcedure eb34f7e309a981aebf4709e257d001d6, server=b87883a7882d,44753,1733072737741}] 2024-12-01T17:08:55,829 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=5429f20942a88aa4e36d8f0081a686f8, ASSIGN because future has completed 2024-12-01T17:08:55,830 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=118, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5429f20942a88aa4e36d8f0081a686f8, server=b87883a7882d,36521,1733072737896}] 2024-12-01T17:08:55,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-01T17:08:55,985 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,1,1733072935202.eb34f7e309a981aebf4709e257d001d6. 2024-12-01T17:08:55,986 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7752): Opening region: {ENCODED => eb34f7e309a981aebf4709e257d001d6, NAME => 'testtb-testConsecutiveExports,1,1733072935202.eb34f7e309a981aebf4709e257d001d6.', STARTKEY => '1', ENDKEY => ''} 2024-12-01T17:08:55,987 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,1,1733072935202.eb34f7e309a981aebf4709e257d001d6. service=AccessControlService 2024-12-01T17:08:55,987 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-01T17:08:55,987 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports eb34f7e309a981aebf4709e257d001d6 2024-12-01T17:08:55,987 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1733072935202.eb34f7e309a981aebf4709e257d001d6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:08:55,987 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7794): checking encryption for eb34f7e309a981aebf4709e257d001d6 2024-12-01T17:08:55,988 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7797): checking classloading for eb34f7e309a981aebf4709e257d001d6 2024-12-01T17:08:55,993 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,,1733072935202.5429f20942a88aa4e36d8f0081a686f8. 2024-12-01T17:08:55,993 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7752): Opening region: {ENCODED => 5429f20942a88aa4e36d8f0081a686f8, NAME => 'testtb-testConsecutiveExports,,1733072935202.5429f20942a88aa4e36d8f0081a686f8.', STARTKEY => '', ENDKEY => '1'} 2024-12-01T17:08:55,993 INFO [StoreOpener-eb34f7e309a981aebf4709e257d001d6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region eb34f7e309a981aebf4709e257d001d6 2024-12-01T17:08:55,994 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,,1733072935202.5429f20942a88aa4e36d8f0081a686f8. service=AccessControlService 2024-12-01T17:08:55,994 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-01T17:08:55,994 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 5429f20942a88aa4e36d8f0081a686f8 2024-12-01T17:08:55,994 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1733072935202.5429f20942a88aa4e36d8f0081a686f8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:08:55,994 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7794): checking encryption for 5429f20942a88aa4e36d8f0081a686f8 2024-12-01T17:08:55,994 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7797): checking classloading for 5429f20942a88aa4e36d8f0081a686f8 2024-12-01T17:08:55,996 INFO [StoreOpener-eb34f7e309a981aebf4709e257d001d6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region eb34f7e309a981aebf4709e257d001d6 columnFamilyName cf 2024-12-01T17:08:55,996 INFO [StoreOpener-5429f20942a88aa4e36d8f0081a686f8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 5429f20942a88aa4e36d8f0081a686f8 2024-12-01T17:08:55,996 DEBUG [StoreOpener-eb34f7e309a981aebf4709e257d001d6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:08:55,997 INFO [StoreOpener-eb34f7e309a981aebf4709e257d001d6-1 {}] regionserver.HStore(327): Store=eb34f7e309a981aebf4709e257d001d6/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T17:08:55,997 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1038): replaying wal for eb34f7e309a981aebf4709e257d001d6 2024-12-01T17:08:55,999 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testConsecutiveExports/eb34f7e309a981aebf4709e257d001d6 2024-12-01T17:08:55,999 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testConsecutiveExports/eb34f7e309a981aebf4709e257d001d6 2024-12-01T17:08:56,000 INFO [StoreOpener-5429f20942a88aa4e36d8f0081a686f8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5429f20942a88aa4e36d8f0081a686f8 columnFamilyName cf 2024-12-01T17:08:56,000 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1048): stopping wal replay for eb34f7e309a981aebf4709e257d001d6 2024-12-01T17:08:56,000 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1060): Cleaning up temporary data for eb34f7e309a981aebf4709e257d001d6 2024-12-01T17:08:56,000 DEBUG [StoreOpener-5429f20942a88aa4e36d8f0081a686f8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:08:56,001 INFO [StoreOpener-5429f20942a88aa4e36d8f0081a686f8-1 {}] regionserver.HStore(327): Store=5429f20942a88aa4e36d8f0081a686f8/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T17:08:56,001 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1038): replaying wal for 5429f20942a88aa4e36d8f0081a686f8 2024-12-01T17:08:56,001 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testConsecutiveExports/5429f20942a88aa4e36d8f0081a686f8 2024-12-01T17:08:56,002 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testConsecutiveExports/5429f20942a88aa4e36d8f0081a686f8 2024-12-01T17:08:56,002 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1093): writing seq id for eb34f7e309a981aebf4709e257d001d6 2024-12-01T17:08:56,002 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1048): stopping wal replay for 5429f20942a88aa4e36d8f0081a686f8 2024-12-01T17:08:56,002 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1060): Cleaning up temporary data for 5429f20942a88aa4e36d8f0081a686f8 2024-12-01T17:08:56,004 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1093): writing seq id for 5429f20942a88aa4e36d8f0081a686f8 2024-12-01T17:08:56,009 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testConsecutiveExports/5429f20942a88aa4e36d8f0081a686f8/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T17:08:56,009 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testConsecutiveExports/eb34f7e309a981aebf4709e257d001d6/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T17:08:56,010 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1114): Opened 5429f20942a88aa4e36d8f0081a686f8; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59049995, jitterRate=-0.12008650600910187}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T17:08:56,010 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5429f20942a88aa4e36d8f0081a686f8 2024-12-01T17:08:56,011 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1006): Region open journal for 5429f20942a88aa4e36d8f0081a686f8: Running coprocessor pre-open hook at 1733072935994Writing region info on filesystem at 1733072935994Initializing all the Stores at 1733072935996 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733072935996Cleaning up temporary data from old regions at 1733072936002 (+6 ms)Running coprocessor post-open hooks at 1733072936010 (+8 ms)Region opened successfully at 1733072936011 (+1 ms) 2024-12-01T17:08:56,012 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,,1733072935202.5429f20942a88aa4e36d8f0081a686f8., pid=118, masterSystemTime=1733072935985 2024-12-01T17:08:56,012 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1114): Opened eb34f7e309a981aebf4709e257d001d6; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74125703, jitterRate=0.1045590490102768}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T17:08:56,012 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1122): Running coprocessor post-open hooks for eb34f7e309a981aebf4709e257d001d6 2024-12-01T17:08:56,012 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1006): Region open journal for eb34f7e309a981aebf4709e257d001d6: Running coprocessor pre-open hook at 1733072935988Writing region info on filesystem at 1733072935988Initializing all the Stores at 1733072935991 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733072935991Cleaning up temporary data from old regions at 1733072936000 (+9 ms)Running coprocessor post-open hooks at 1733072936012 (+12 ms)Region opened successfully at 1733072936012 2024-12-01T17:08:56,014 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,1,1733072935202.eb34f7e309a981aebf4709e257d001d6., pid=117, masterSystemTime=1733072935982 2024-12-01T17:08:56,015 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,,1733072935202.5429f20942a88aa4e36d8f0081a686f8. 2024-12-01T17:08:56,015 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,,1733072935202.5429f20942a88aa4e36d8f0081a686f8. 2024-12-01T17:08:56,016 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=5429f20942a88aa4e36d8f0081a686f8, regionState=OPEN, openSeqNum=2, regionLocation=b87883a7882d,36521,1733072737896 2024-12-01T17:08:56,018 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=118, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5429f20942a88aa4e36d8f0081a686f8, server=b87883a7882d,36521,1733072737896 because future has completed 2024-12-01T17:08:56,019 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,1,1733072935202.eb34f7e309a981aebf4709e257d001d6. 2024-12-01T17:08:56,019 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,1,1733072935202.eb34f7e309a981aebf4709e257d001d6. 2024-12-01T17:08:56,019 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=116 updating hbase:meta row=eb34f7e309a981aebf4709e257d001d6, regionState=OPEN, openSeqNum=2, regionLocation=b87883a7882d,44753,1733072737741 2024-12-01T17:08:56,021 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=117, ppid=116, state=RUNNABLE, hasLock=false; OpenRegionProcedure eb34f7e309a981aebf4709e257d001d6, server=b87883a7882d,44753,1733072737741 because future has completed 2024-12-01T17:08:56,027 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=118, resume processing ppid=115 2024-12-01T17:08:56,030 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=118, ppid=115, state=SUCCESS, hasLock=false; OpenRegionProcedure 5429f20942a88aa4e36d8f0081a686f8, server=b87883a7882d,36521,1733072737896 in 191 msec 2024-12-01T17:08:56,030 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=117, resume processing ppid=116 2024-12-01T17:08:56,030 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=115, ppid=114, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=5429f20942a88aa4e36d8f0081a686f8, ASSIGN in 368 msec 2024-12-01T17:08:56,030 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=117, ppid=116, state=SUCCESS, hasLock=false; OpenRegionProcedure eb34f7e309a981aebf4709e257d001d6, server=b87883a7882d,44753,1733072737741 in 207 msec 2024-12-01T17:08:56,035 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=116, resume processing ppid=114 2024-12-01T17:08:56,035 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=116, ppid=114, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=eb34f7e309a981aebf4709e257d001d6, ASSIGN in 370 msec 2024-12-01T17:08:56,038 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-01T17:08:56,038 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733072936038"}]},"ts":"1733072936038"} 2024-12-01T17:08:56,041 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLED in hbase:meta 2024-12-01T17:08:56,043 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_POST_OPERATION 2024-12-01T17:08:56,043 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testConsecutiveExports jenkins: RWXCA 2024-12-01T17:08:56,048 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36521 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-01T17:08:56,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:08:56,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:08:56,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:08:56,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:08:56,128 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-01T17:08:56,128 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-01T17:08:56,129 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-01T17:08:56,129 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-01T17:08:56,132 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=114, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports in 927 msec 2024-12-01T17:08:56,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-01T17:08:56,341 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testConsecutiveExports get assigned. Timeout = 60000ms 2024-12-01T17:08:56,341 INFO [Async-Client-Retry-Timer-pool-0 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testConsecutiveExports completed 2024-12-01T17:08:56,342 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T17:08:56,346 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testConsecutiveExports assigned to meta. Checking AM states. 2024-12-01T17:08:56,347 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T17:08:56,347 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testConsecutiveExports assigned. 2024-12-01T17:08:56,347 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-01T17:08:56,351 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-01T17:08:56,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733072936351 (current time:1733072936351). 2024-12-01T17:08:56,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-01T17:08:56,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-01T17:08:56,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-01T17:08:56,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2445c41c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:08:56,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:08:56,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:08:56,354 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:08:56,354 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:08:56,354 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:08:56,355 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78b8e5f0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:08:56,355 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:08:56,355 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:08:56,355 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:08:56,357 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55856, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:08:56,357 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1594c6b8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:08:56,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:08:56,359 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:08:56,360 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:08:56,361 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41070, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:08:56,363 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737. 2024-12-01T17:08:56,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:08:56,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:08:56,363 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:08:56,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:08:56,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ea874a9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:08:56,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:08:56,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:08:56,374 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:08:56,374 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:08:56,374 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:08:56,375 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ae2b639, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:08:56,375 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:08:56,375 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:08:56,375 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:08:56,376 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55870, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:08:56,377 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27954446, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:08:56,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:08:56,381 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:08:56,382 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:08:56,383 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41072, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:08:56,386 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:08:56,387 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:08:56,388 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34676, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:08:56,390 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737. 2024-12-01T17:08:56,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor278.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:08:56,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:08:56,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:08:56,391 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:08:56,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-01T17:08:56,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-01T17:08:56,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=119, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-01T17:08:56,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 119 2024-12-01T17:08:56,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-01T17:08:56,397 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-01T17:08:56,401 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-01T17:08:56,405 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-01T17:08:56,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-01T17:08:56,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-01T17:08:57,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-01T17:08:57,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742071_1247 (size=161) 2024-12-01T17:08:57,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742071_1247 (size=161) 2024-12-01T17:08:57,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742071_1247 (size=161) 2024-12-01T17:08:57,330 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-01T17:08:57,330 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports Metrics about Tables on a single HBase RegionServer 2024-12-01T17:08:57,331 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-01T17:08:57,517 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-01T17:08:57,517 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5429f20942a88aa4e36d8f0081a686f8}, {pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure eb34f7e309a981aebf4709e257d001d6}] 2024-12-01T17:08:57,527 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5429f20942a88aa4e36d8f0081a686f8 2024-12-01T17:08:57,527 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure eb34f7e309a981aebf4709e257d001d6 2024-12-01T17:08:57,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-01T17:08:57,680 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44753 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=121 2024-12-01T17:08:57,680 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36521 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=120 2024-12-01T17:08:57,680 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1733072935202.eb34f7e309a981aebf4709e257d001d6. 2024-12-01T17:08:57,680 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.HRegion(2603): Flush status journal for eb34f7e309a981aebf4709e257d001d6: 2024-12-01T17:08:57,680 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1733072935202.5429f20942a88aa4e36d8f0081a686f8. 2024-12-01T17:08:57,680 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1733072935202.eb34f7e309a981aebf4709e257d001d6. for emptySnaptb0-testConsecutiveExports completed. 2024-12-01T17:08:57,681 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.HRegion(2603): Flush status journal for 5429f20942a88aa4e36d8f0081a686f8: 2024-12-01T17:08:57,681 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1733072935202.5429f20942a88aa4e36d8f0081a686f8. for emptySnaptb0-testConsecutiveExports completed. 2024-12-01T17:08:57,681 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1733072935202.eb34f7e309a981aebf4709e257d001d6.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-01T17:08:57,681 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:08:57,681 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1733072935202.5429f20942a88aa4e36d8f0081a686f8.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-01T17:08:57,681 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-01T17:08:57,681 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:08:57,681 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-01T17:08:57,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742072_1248 (size=68) 2024-12-01T17:08:57,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742072_1248 (size=68) 2024-12-01T17:08:57,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742072_1248 (size=68) 2024-12-01T17:08:57,848 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1733072935202.eb34f7e309a981aebf4709e257d001d6. 2024-12-01T17:08:57,848 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-12-01T17:08:57,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=121 2024-12-01T17:08:57,849 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region eb34f7e309a981aebf4709e257d001d6 2024-12-01T17:08:57,849 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure eb34f7e309a981aebf4709e257d001d6 2024-12-01T17:08:57,853 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=121, ppid=119, state=SUCCESS, hasLock=false; SnapshotRegionProcedure eb34f7e309a981aebf4709e257d001d6 in 334 msec 2024-12-01T17:08:57,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742073_1249 (size=68) 2024-12-01T17:08:57,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742073_1249 (size=68) 2024-12-01T17:08:57,897 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1733072935202.5429f20942a88aa4e36d8f0081a686f8. 2024-12-01T17:08:57,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742073_1249 (size=68) 2024-12-01T17:08:57,897 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=120 2024-12-01T17:08:57,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=120 2024-12-01T17:08:57,898 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 5429f20942a88aa4e36d8f0081a686f8 2024-12-01T17:08:57,898 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5429f20942a88aa4e36d8f0081a686f8 2024-12-01T17:08:57,902 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=120, resume processing ppid=119 2024-12-01T17:08:57,902 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=120, ppid=119, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5429f20942a88aa4e36d8f0081a686f8 in 382 msec 2024-12-01T17:08:57,902 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-01T17:08:57,903 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-01T17:08:57,904 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-01T17:08:57,905 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testConsecutiveExports 2024-12-01T17:08:57,906 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports 2024-12-01T17:08:58,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742074_1250 (size=543) 2024-12-01T17:08:58,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742074_1250 (size=543) 2024-12-01T17:08:58,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742074_1250 (size=543) 2024-12-01T17:08:58,071 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-01T17:08:58,110 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-01T17:08:58,111 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/emptySnaptb0-testConsecutiveExports 2024-12-01T17:08:58,113 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-01T17:08:58,113 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 119 2024-12-01T17:08:58,117 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=119, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 1.7200 sec 2024-12-01T17:08:58,344 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testConsecutiveExports' 2024-12-01T17:08:58,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-01T17:08:58,541 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-12-01T17:08:58,551 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='004c8c4139c9ab73ebb250c54775014f8', locateType=CURRENT is [region=testtb-testConsecutiveExports,,1733072935202.5429f20942a88aa4e36d8f0081a686f8., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:08:58,551 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='1c732a5c27a47b3da14f72fcca34dd5d6', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1733072935202.eb34f7e309a981aebf4709e257d001d6., hostname=b87883a7882d,44753,1733072737741, seqNum=2] 2024-12-01T17:08:58,559 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='26f4c54e93c1084dc65d10fc3df3a362b', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1733072935202.eb34f7e309a981aebf4709e257d001d6., hostname=b87883a7882d,44753,1733072737741, seqNum=2] 2024-12-01T17:08:58,560 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='3eec0a158603c869636a3280e840e8c20', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1733072935202.eb34f7e309a981aebf4709e257d001d6., hostname=b87883a7882d,44753,1733072737741, seqNum=2] 2024-12-01T17:08:58,562 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='5a0ea30d4571175bcdee4208fd14a3970', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1733072935202.eb34f7e309a981aebf4709e257d001d6., hostname=b87883a7882d,44753,1733072737741, seqNum=2] 2024-12-01T17:08:58,563 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='68ffd529e2744ceaad82a7994af882687', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1733072935202.eb34f7e309a981aebf4709e257d001d6., hostname=b87883a7882d,44753,1733072737741, seqNum=2] 2024-12-01T17:08:58,564 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='4267c79c56bc22b71a1e4ee986a852cd0', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1733072935202.eb34f7e309a981aebf4709e257d001d6., hostname=b87883a7882d,44753,1733072737741, seqNum=2] 2024-12-01T17:08:58,575 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44753 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,1,1733072935202.eb34f7e309a981aebf4709e257d001d6. with WAL disabled. Data may be lost in the event of a crash. 2024-12-01T17:08:58,577 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36521 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,,1733072935202.5429f20942a88aa4e36d8f0081a686f8. with WAL disabled. Data may be lost in the event of a crash. 2024-12-01T17:08:58,579 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-01T17:08:58,588 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testConsecutiveExports 2024-12-01T17:08:58,588 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testConsecutiveExports,,1733072935202.5429f20942a88aa4e36d8f0081a686f8. 2024-12-01T17:08:58,589 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T17:08:58,592 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-01T17:08:58,602 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-01T17:08:58,612 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-01T17:08:58,616 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-01T17:08:58,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733072938616 (current time:1733072938616). 2024-12-01T17:08:58,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-01T17:08:58,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-01T17:08:58,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-01T17:08:58,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2029c082, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:08:58,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:08:58,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:08:58,630 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:08:58,631 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:08:58,631 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:08:58,631 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@492bd7ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:08:58,631 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:08:58,632 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:08:58,632 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:08:58,634 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55880, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:08:58,635 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@150e143d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:08:58,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:08:58,636 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:08:58,637 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:08:58,638 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41074, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:08:58,640 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737. 2024-12-01T17:08:58,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:08:58,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:08:58,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:08:58,642 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:08:58,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b1e2267, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:08:58,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:08:58,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:08:58,659 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:08:58,659 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:08:58,659 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:08:58,660 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3fc48584, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:08:58,660 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:08:58,660 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:08:58,660 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:08:58,663 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55890, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:08:58,665 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d094a6a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:08:58,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:08:58,668 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:08:58,669 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:08:58,670 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41084, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:08:58,673 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:08:58,673 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:08:58,675 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34682, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:08:58,676 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737. 2024-12-01T17:08:58,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor278.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:08:58,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:08:58,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:08:58,677 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:08:58,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-01T17:08:58,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-01T17:08:58,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-01T17:08:58,682 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-01T17:08:58,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-01T17:08:58,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-01T17:08:58,684 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-01T17:08:58,689 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-01T17:08:58,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742075_1251 (size=156) 2024-12-01T17:08:58,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742075_1251 (size=156) 2024-12-01T17:08:58,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742075_1251 (size=156) 2024-12-01T17:08:58,784 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-01T17:08:58,784 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5429f20942a88aa4e36d8f0081a686f8}, {pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure eb34f7e309a981aebf4709e257d001d6}] 2024-12-01T17:08:58,786 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure eb34f7e309a981aebf4709e257d001d6 2024-12-01T17:08:58,787 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5429f20942a88aa4e36d8f0081a686f8 2024-12-01T17:08:58,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-01T17:08:58,895 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0004_000001 (auth:SIMPLE) from 127.0.0.1:54242 2024-12-01T17:08:58,939 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44753 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=124 2024-12-01T17:08:58,939 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1733072935202.eb34f7e309a981aebf4709e257d001d6. 2024-12-01T17:08:58,940 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(2902): Flushing eb34f7e309a981aebf4709e257d001d6 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-01T17:08:58,940 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36521 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=123 2024-12-01T17:08:58,940 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1733072935202.5429f20942a88aa4e36d8f0081a686f8. 2024-12-01T17:08:58,940 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2902): Flushing 5429f20942a88aa4e36d8f0081a686f8 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-01T17:08:58,976 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testConsecutiveExports/eb34f7e309a981aebf4709e257d001d6/.tmp/cf/8f3fd29c0a474cdea05f62f2f7f6821f is 71, key is 12597eb7f17cb6f84458889fc21cdddb/cf:q/1733072938574/Put/seqid=0 2024-12-01T17:08:58,982 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testConsecutiveExports/5429f20942a88aa4e36d8f0081a686f8/.tmp/cf/6476ed675d2742999963d96b4f192969 is 71, key is 00f9e7b4ea1f558aa8ca255f0cad2719/cf:q/1733072938577/Put/seqid=0 2024-12-01T17:08:59,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-01T17:08:59,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742076_1252 (size=8256) 2024-12-01T17:08:59,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742076_1252 (size=8256) 2024-12-01T17:08:59,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742076_1252 (size=8256) 2024-12-01T17:08:59,084 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testConsecutiveExports/eb34f7e309a981aebf4709e257d001d6/.tmp/cf/8f3fd29c0a474cdea05f62f2f7f6821f 2024-12-01T17:08:59,094 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testConsecutiveExports/eb34f7e309a981aebf4709e257d001d6/.tmp/cf/8f3fd29c0a474cdea05f62f2f7f6821f as hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testConsecutiveExports/eb34f7e309a981aebf4709e257d001d6/cf/8f3fd29c0a474cdea05f62f2f7f6821f 2024-12-01T17:08:59,103 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testConsecutiveExports/eb34f7e309a981aebf4709e257d001d6/cf/8f3fd29c0a474cdea05f62f2f7f6821f, entries=46, sequenceid=6, filesize=8.1 K 2024-12-01T17:08:59,104 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for eb34f7e309a981aebf4709e257d001d6 in 164ms, sequenceid=6, compaction requested=false 2024-12-01T17:08:59,104 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(2603): Flush status journal for eb34f7e309a981aebf4709e257d001d6: 2024-12-01T17:08:59,104 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1733072935202.eb34f7e309a981aebf4709e257d001d6. for snaptb0-testConsecutiveExports completed. 2024-12-01T17:08:59,105 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1733072935202.eb34f7e309a981aebf4709e257d001d6.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-01T17:08:59,105 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:08:59,105 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testConsecutiveExports/eb34f7e309a981aebf4709e257d001d6/cf/8f3fd29c0a474cdea05f62f2f7f6821f] hfiles 2024-12-01T17:08:59,105 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testConsecutiveExports/eb34f7e309a981aebf4709e257d001d6/cf/8f3fd29c0a474cdea05f62f2f7f6821f for snapshot=snaptb0-testConsecutiveExports 2024-12-01T17:08:59,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742077_1253 (size=5356) 2024-12-01T17:08:59,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742077_1253 (size=5356) 2024-12-01T17:08:59,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742077_1253 (size=5356) 2024-12-01T17:08:59,141 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testConsecutiveExports/5429f20942a88aa4e36d8f0081a686f8/.tmp/cf/6476ed675d2742999963d96b4f192969 2024-12-01T17:08:59,151 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testConsecutiveExports/5429f20942a88aa4e36d8f0081a686f8/.tmp/cf/6476ed675d2742999963d96b4f192969 as hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testConsecutiveExports/5429f20942a88aa4e36d8f0081a686f8/cf/6476ed675d2742999963d96b4f192969 2024-12-01T17:08:59,161 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testConsecutiveExports/5429f20942a88aa4e36d8f0081a686f8/cf/6476ed675d2742999963d96b4f192969, entries=4, sequenceid=6, filesize=5.2 K 2024-12-01T17:08:59,163 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 5429f20942a88aa4e36d8f0081a686f8 in 223ms, sequenceid=6, compaction requested=false 2024-12-01T17:08:59,163 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2603): Flush status journal for 5429f20942a88aa4e36d8f0081a686f8: 2024-12-01T17:08:59,163 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1733072935202.5429f20942a88aa4e36d8f0081a686f8. for snaptb0-testConsecutiveExports completed. 2024-12-01T17:08:59,163 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1733072935202.5429f20942a88aa4e36d8f0081a686f8.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-01T17:08:59,163 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:08:59,163 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testConsecutiveExports/5429f20942a88aa4e36d8f0081a686f8/cf/6476ed675d2742999963d96b4f192969] hfiles 2024-12-01T17:08:59,164 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testConsecutiveExports/5429f20942a88aa4e36d8f0081a686f8/cf/6476ed675d2742999963d96b4f192969 for snapshot=snaptb0-testConsecutiveExports 2024-12-01T17:08:59,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742078_1254 (size=107) 2024-12-01T17:08:59,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742078_1254 (size=107) 2024-12-01T17:08:59,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742078_1254 (size=107) 2024-12-01T17:08:59,217 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1733072935202.eb34f7e309a981aebf4709e257d001d6. 2024-12-01T17:08:59,217 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=124 2024-12-01T17:08:59,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=124 2024-12-01T17:08:59,218 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region eb34f7e309a981aebf4709e257d001d6 2024-12-01T17:08:59,218 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure eb34f7e309a981aebf4709e257d001d6 2024-12-01T17:08:59,225 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=124, ppid=122, state=SUCCESS, hasLock=false; SnapshotRegionProcedure eb34f7e309a981aebf4709e257d001d6 in 438 msec 2024-12-01T17:08:59,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742079_1255 (size=107) 2024-12-01T17:08:59,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742079_1255 (size=107) 2024-12-01T17:08:59,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742079_1255 (size=107) 2024-12-01T17:08:59,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-01T17:08:59,312 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1733072935202.5429f20942a88aa4e36d8f0081a686f8. 2024-12-01T17:08:59,312 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-12-01T17:08:59,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=123 2024-12-01T17:08:59,313 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 5429f20942a88aa4e36d8f0081a686f8 2024-12-01T17:08:59,314 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5429f20942a88aa4e36d8f0081a686f8 2024-12-01T17:08:59,323 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=123, resume processing ppid=122 2024-12-01T17:08:59,323 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=123, ppid=122, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5429f20942a88aa4e36d8f0081a686f8 in 531 msec 2024-12-01T17:08:59,323 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-01T17:08:59,330 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-01T17:08:59,338 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-01T17:08:59,338 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testConsecutiveExports 2024-12-01T17:08:59,339 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-01T17:08:59,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742080_1256 (size=621) 2024-12-01T17:08:59,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742080_1256 (size=621) 2024-12-01T17:08:59,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742080_1256 (size=621) 2024-12-01T17:08:59,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-01T17:08:59,889 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-01T17:08:59,928 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-01T17:08:59,929 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-01T17:08:59,931 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-01T17:08:59,932 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-01T17:08:59,934 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=122, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 1.2540 sec 2024-12-01T17:09:00,460 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-01T17:09:00,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-01T17:09:00,830 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-12-01T17:09:00,831 INFO [Time-limited test {}] snapshot.TestExportSnapshot(523): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/local-export-1733072940830 2024-12-01T17:09:00,831 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/local-export-1733072940830, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/local-export-1733072940830, srcFsUri=hdfs://localhost:44451, srcDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:09:00,861 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:44451, inputRoot=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:09:00,862 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@767e74a0, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/local-export-1733072940830, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/local-export-1733072940830/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-01T17:09:00,863 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-01T17:09:00,867 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/local-export-1733072940830/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-01T17:09:00,890 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:09:00,891 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:09:00,891 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:09:01,776 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/hadoop-14990985995608377324.jar 2024-12-01T17:09:01,777 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:09:01,777 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:09:01,830 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/hadoop-7543766646609103754.jar 2024-12-01T17:09:01,830 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:09:01,830 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:09:01,831 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:09:01,831 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:09:01,831 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:09:01,831 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:09:01,831 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-01T17:09:01,831 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-01T17:09:01,832 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-01T17:09:01,832 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-01T17:09:01,832 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-01T17:09:01,832 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-01T17:09:01,832 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-01T17:09:01,833 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-01T17:09:01,833 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-01T17:09:01,833 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-01T17:09:01,833 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-01T17:09:01,834 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:09:01,834 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:09:01,834 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-01T17:09:01,834 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:09:01,834 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:09:01,834 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-01T17:09:01,835 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-01T17:09:01,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742081_1257 (size=24020) 2024-12-01T17:09:01,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742081_1257 (size=24020) 2024-12-01T17:09:01,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742081_1257 (size=24020) 2024-12-01T17:09:01,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742082_1258 (size=77755) 2024-12-01T17:09:01,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742082_1258 (size=77755) 2024-12-01T17:09:01,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742082_1258 (size=77755) 2024-12-01T17:09:01,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742083_1259 (size=131360) 2024-12-01T17:09:01,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742083_1259 (size=131360) 2024-12-01T17:09:01,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742083_1259 (size=131360) 2024-12-01T17:09:01,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742084_1260 (size=111793) 2024-12-01T17:09:01,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742084_1260 (size=111793) 2024-12-01T17:09:01,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742084_1260 (size=111793) 2024-12-01T17:09:01,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742085_1261 (size=1832290) 2024-12-01T17:09:01,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742085_1261 (size=1832290) 2024-12-01T17:09:01,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742085_1261 (size=1832290) 2024-12-01T17:09:01,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742086_1262 (size=8360005) 2024-12-01T17:09:01,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742086_1262 (size=8360005) 2024-12-01T17:09:01,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742086_1262 (size=8360005) 2024-12-01T17:09:01,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742087_1263 (size=503880) 2024-12-01T17:09:01,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742087_1263 (size=503880) 2024-12-01T17:09:01,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742087_1263 (size=503880) 2024-12-01T17:09:01,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742088_1264 (size=322274) 2024-12-01T17:09:01,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742088_1264 (size=322274) 2024-12-01T17:09:01,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742088_1264 (size=322274) 2024-12-01T17:09:01,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742089_1265 (size=20406) 2024-12-01T17:09:01,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742089_1265 (size=20406) 2024-12-01T17:09:01,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742089_1265 (size=20406) 2024-12-01T17:09:01,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742090_1266 (size=45609) 2024-12-01T17:09:01,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742090_1266 (size=45609) 2024-12-01T17:09:01,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742090_1266 (size=45609) 2024-12-01T17:09:01,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742091_1267 (size=136454) 2024-12-01T17:09:01,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742091_1267 (size=136454) 2024-12-01T17:09:01,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742091_1267 (size=136454) 2024-12-01T17:09:01,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742092_1268 (size=1597136) 2024-12-01T17:09:01,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742092_1268 (size=1597136) 2024-12-01T17:09:01,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742092_1268 (size=1597136) 2024-12-01T17:09:01,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742093_1269 (size=30873) 2024-12-01T17:09:01,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742093_1269 (size=30873) 2024-12-01T17:09:01,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742093_1269 (size=30873) 2024-12-01T17:09:01,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742094_1270 (size=29229) 2024-12-01T17:09:01,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742094_1270 (size=29229) 2024-12-01T17:09:01,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742094_1270 (size=29229) 2024-12-01T17:09:02,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742095_1271 (size=903864) 2024-12-01T17:09:02,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742095_1271 (size=903864) 2024-12-01T17:09:02,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742095_1271 (size=903864) 2024-12-01T17:09:02,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742096_1272 (size=5175431) 2024-12-01T17:09:02,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742096_1272 (size=5175431) 2024-12-01T17:09:02,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742096_1272 (size=5175431) 2024-12-01T17:09:02,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742097_1273 (size=232881) 2024-12-01T17:09:02,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742097_1273 (size=232881) 2024-12-01T17:09:02,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742097_1273 (size=232881) 2024-12-01T17:09:02,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742098_1274 (size=1323991) 2024-12-01T17:09:02,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742098_1274 (size=1323991) 2024-12-01T17:09:02,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742098_1274 (size=1323991) 2024-12-01T17:09:02,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742099_1275 (size=4695811) 2024-12-01T17:09:02,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742099_1275 (size=4695811) 2024-12-01T17:09:02,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742099_1275 (size=4695811) 2024-12-01T17:09:02,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742100_1276 (size=440956) 2024-12-01T17:09:02,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742100_1276 (size=440956) 2024-12-01T17:09:02,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742100_1276 (size=440956) 2024-12-01T17:09:02,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742101_1277 (size=1877034) 2024-12-01T17:09:02,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742101_1277 (size=1877034) 2024-12-01T17:09:02,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742101_1277 (size=1877034) 2024-12-01T17:09:02,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742102_1278 (size=217555) 2024-12-01T17:09:02,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742102_1278 (size=217555) 2024-12-01T17:09:02,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742102_1278 (size=217555) 2024-12-01T17:09:02,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742103_1279 (size=4188619) 2024-12-01T17:09:02,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742103_1279 (size=4188619) 2024-12-01T17:09:02,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742103_1279 (size=4188619) 2024-12-01T17:09:02,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742104_1280 (size=127628) 2024-12-01T17:09:02,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742104_1280 (size=127628) 2024-12-01T17:09:02,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742104_1280 (size=127628) 2024-12-01T17:09:02,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742105_1281 (size=6424742) 2024-12-01T17:09:02,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742105_1281 (size=6424742) 2024-12-01T17:09:02,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742105_1281 (size=6424742) 2024-12-01T17:09:02,116 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-01T17:09:02,118 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-01T17:09:02,119 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.1 K 2024-12-01T17:09:02,120 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.2 K 2024-12-01T17:09:02,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742106_1282 (size=441) 2024-12-01T17:09:02,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742106_1282 (size=441) 2024-12-01T17:09:02,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742106_1282 (size=441) 2024-12-01T17:09:02,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742107_1283 (size=21) 2024-12-01T17:09:02,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742107_1283 (size=21) 2024-12-01T17:09:02,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742107_1283 (size=21) 2024-12-01T17:09:02,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742108_1284 (size=304130) 2024-12-01T17:09:02,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742108_1284 (size=304130) 2024-12-01T17:09:02,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742108_1284 (size=304130) 2024-12-01T17:09:02,169 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-01T17:09:02,169 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-01T17:09:02,778 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0005_000001 (auth:SIMPLE) from 127.0.0.1:53168 2024-12-01T17:09:03,992 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_3/usercache/jenkins/appcache/application_1733072744857_0004/container_1733072744857_0004_01_000001/launch_container.sh] 2024-12-01T17:09:03,992 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_3/usercache/jenkins/appcache/application_1733072744857_0004/container_1733072744857_0004_01_000001/container_tokens] 2024-12-01T17:09:03,992 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_3/usercache/jenkins/appcache/application_1733072744857_0004/container_1733072744857_0004_01_000001/sysfs] 2024-12-01T17:09:05,859 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-01T17:09:07,306 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0005_000001 (auth:SIMPLE) from 127.0.0.1:52666 2024-12-01T17:09:07,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742109_1285 (size=349828) 2024-12-01T17:09:07,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742109_1285 (size=349828) 2024-12-01T17:09:07,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742109_1285 (size=349828) 2024-12-01T17:09:09,530 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0005_000001 (auth:SIMPLE) from 127.0.0.1:53172 2024-12-01T17:09:09,530 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0005_000001 (auth:SIMPLE) from 127.0.0.1:35336 2024-12-01T17:09:14,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742110_1286 (size=22235) 2024-12-01T17:09:14,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742110_1286 (size=22235) 2024-12-01T17:09:14,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742110_1286 (size=22235) 2024-12-01T17:09:14,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742111_1287 (size=462) 2024-12-01T17:09:14,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742111_1287 (size=462) 2024-12-01T17:09:14,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742111_1287 (size=462) 2024-12-01T17:09:15,008 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_2/usercache/jenkins/appcache/application_1733072744857_0005/container_1733072744857_0005_01_000003/launch_container.sh] 2024-12-01T17:09:15,008 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_2/usercache/jenkins/appcache/application_1733072744857_0005/container_1733072744857_0005_01_000003/container_tokens] 2024-12-01T17:09:15,008 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_2/usercache/jenkins/appcache/application_1733072744857_0005/container_1733072744857_0005_01_000003/sysfs] 2024-12-01T17:09:15,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742112_1288 (size=22235) 2024-12-01T17:09:15,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742112_1288 (size=22235) 2024-12-01T17:09:15,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742112_1288 (size=22235) 2024-12-01T17:09:15,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742113_1289 (size=349828) 2024-12-01T17:09:15,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742113_1289 (size=349828) 2024-12-01T17:09:15,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742113_1289 (size=349828) 2024-12-01T17:09:17,561 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-01T17:09:17,561 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-01T17:09:17,581 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testConsecutiveExports 2024-12-01T17:09:17,581 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-01T17:09:17,582 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-01T17:09:17,582 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-69539515_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-01T17:09:17,585 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-01T17:09:17,586 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-01T17:09:17,586 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in org.apache.hadoop.fs.LocalFileSystem@767e74a0 in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/local-export-1733072940830/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/local-export-1733072940830/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-01T17:09:17,586 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/local-export-1733072940830/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-01T17:09:17,586 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/local-export-1733072940830/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-01T17:09:17,588 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/local-export-1733072940830, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/local-export-1733072940830, srcFsUri=hdfs://localhost:44451, srcDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:09:17,623 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:44451, inputRoot=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:09:17,623 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@767e74a0, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/local-export-1733072940830, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/local-export-1733072940830/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-01T17:09:17,625 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-01T17:09:17,642 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/local-export-1733072940830/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-01T17:09:17,661 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:09:17,661 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:09:17,661 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:09:18,634 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_1/usercache/jenkins/appcache/application_1733072744857_0005/container_1733072744857_0005_01_000002/launch_container.sh] 2024-12-01T17:09:18,634 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_1/usercache/jenkins/appcache/application_1733072744857_0005/container_1733072744857_0005_01_000002/container_tokens] 2024-12-01T17:09:18,634 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_1/usercache/jenkins/appcache/application_1733072744857_0005/container_1733072744857_0005_01_000002/sysfs] 2024-12-01T17:09:18,825 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/hadoop-4891859373971989523.jar 2024-12-01T17:09:18,826 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:09:18,826 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:09:18,929 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/hadoop-6327671437308265061.jar 2024-12-01T17:09:18,930 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:09:18,930 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:09:18,930 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:09:18,931 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:09:18,931 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:09:18,931 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:09:18,931 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-01T17:09:18,932 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-01T17:09:18,932 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-01T17:09:18,932 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-01T17:09:18,933 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-01T17:09:18,933 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-01T17:09:18,933 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-01T17:09:18,935 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-01T17:09:18,936 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-01T17:09:18,936 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-01T17:09:18,936 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-01T17:09:18,937 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:09:18,937 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:09:18,937 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-01T17:09:18,938 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:09:18,938 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:09:18,938 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-01T17:09:18,938 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-01T17:09:19,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742114_1290 (size=24020) 2024-12-01T17:09:19,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742114_1290 (size=24020) 2024-12-01T17:09:19,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742114_1290 (size=24020) 2024-12-01T17:09:19,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742115_1291 (size=77755) 2024-12-01T17:09:19,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742115_1291 (size=77755) 2024-12-01T17:09:19,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742115_1291 (size=77755) 2024-12-01T17:09:19,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742116_1292 (size=131360) 2024-12-01T17:09:19,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742116_1292 (size=131360) 2024-12-01T17:09:19,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742116_1292 (size=131360) 2024-12-01T17:09:19,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742117_1293 (size=111793) 2024-12-01T17:09:19,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742117_1293 (size=111793) 2024-12-01T17:09:19,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742117_1293 (size=111793) 2024-12-01T17:09:19,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742118_1294 (size=1832290) 2024-12-01T17:09:19,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742118_1294 (size=1832290) 2024-12-01T17:09:19,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742118_1294 (size=1832290) 2024-12-01T17:09:20,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742119_1295 (size=8360005) 2024-12-01T17:09:20,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742119_1295 (size=8360005) 2024-12-01T17:09:20,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742119_1295 (size=8360005) 2024-12-01T17:09:20,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742120_1296 (size=503880) 2024-12-01T17:09:20,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742120_1296 (size=503880) 2024-12-01T17:09:20,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742120_1296 (size=503880) 2024-12-01T17:09:20,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742121_1297 (size=322274) 2024-12-01T17:09:20,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742121_1297 (size=322274) 2024-12-01T17:09:20,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742121_1297 (size=322274) 2024-12-01T17:09:20,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742122_1298 (size=20406) 2024-12-01T17:09:20,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742122_1298 (size=20406) 2024-12-01T17:09:20,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742122_1298 (size=20406) 2024-12-01T17:09:20,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742123_1299 (size=45609) 2024-12-01T17:09:20,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742123_1299 (size=45609) 2024-12-01T17:09:20,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742123_1299 (size=45609) 2024-12-01T17:09:20,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742124_1300 (size=6424742) 2024-12-01T17:09:20,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742124_1300 (size=6424742) 2024-12-01T17:09:20,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742124_1300 (size=6424742) 2024-12-01T17:09:20,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742125_1301 (size=136454) 2024-12-01T17:09:20,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742125_1301 (size=136454) 2024-12-01T17:09:20,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742125_1301 (size=136454) 2024-12-01T17:09:20,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742126_1302 (size=1597136) 2024-12-01T17:09:20,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742126_1302 (size=1597136) 2024-12-01T17:09:20,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742126_1302 (size=1597136) 2024-12-01T17:09:20,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742127_1303 (size=30873) 2024-12-01T17:09:20,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742127_1303 (size=30873) 2024-12-01T17:09:20,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742127_1303 (size=30873) 2024-12-01T17:09:21,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742128_1304 (size=29229) 2024-12-01T17:09:21,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742128_1304 (size=29229) 2024-12-01T17:09:21,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742128_1304 (size=29229) 2024-12-01T17:09:21,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742129_1305 (size=903864) 2024-12-01T17:09:21,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742129_1305 (size=903864) 2024-12-01T17:09:21,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742129_1305 (size=903864) 2024-12-01T17:09:21,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742130_1306 (size=5175431) 2024-12-01T17:09:21,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742130_1306 (size=5175431) 2024-12-01T17:09:21,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742130_1306 (size=5175431) 2024-12-01T17:09:21,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742131_1307 (size=232881) 2024-12-01T17:09:21,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742131_1307 (size=232881) 2024-12-01T17:09:21,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742131_1307 (size=232881) 2024-12-01T17:09:21,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742132_1308 (size=1323991) 2024-12-01T17:09:21,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742132_1308 (size=1323991) 2024-12-01T17:09:21,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742132_1308 (size=1323991) 2024-12-01T17:09:21,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742133_1309 (size=4695811) 2024-12-01T17:09:21,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742133_1309 (size=4695811) 2024-12-01T17:09:21,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742133_1309 (size=4695811) 2024-12-01T17:09:21,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742134_1310 (size=1877034) 2024-12-01T17:09:21,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742134_1310 (size=1877034) 2024-12-01T17:09:21,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742134_1310 (size=1877034) 2024-12-01T17:09:21,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742135_1311 (size=217555) 2024-12-01T17:09:21,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742135_1311 (size=217555) 2024-12-01T17:09:21,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742135_1311 (size=217555) 2024-12-01T17:09:21,801 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0005_000001 (auth:SIMPLE) from 127.0.0.1:52460 2024-12-01T17:09:21,815 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_2/usercache/jenkins/appcache/application_1733072744857_0005/container_1733072744857_0005_01_000001/launch_container.sh] 2024-12-01T17:09:21,816 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_2/usercache/jenkins/appcache/application_1733072744857_0005/container_1733072744857_0005_01_000001/container_tokens] 2024-12-01T17:09:21,816 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_2/usercache/jenkins/appcache/application_1733072744857_0005/container_1733072744857_0005_01_000001/sysfs] 2024-12-01T17:09:21,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742136_1312 (size=440956) 2024-12-01T17:09:21,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742136_1312 (size=440956) 2024-12-01T17:09:21,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742136_1312 (size=440956) 2024-12-01T17:09:21,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742137_1313 (size=4188619) 2024-12-01T17:09:21,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742137_1313 (size=4188619) 2024-12-01T17:09:21,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742137_1313 (size=4188619) 2024-12-01T17:09:21,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742138_1314 (size=127628) 2024-12-01T17:09:21,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742138_1314 (size=127628) 2024-12-01T17:09:21,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742138_1314 (size=127628) 2024-12-01T17:09:21,885 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-01T17:09:21,888 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-01T17:09:21,892 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.1 K 2024-12-01T17:09:21,892 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.2 K 2024-12-01T17:09:21,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742139_1315 (size=441) 2024-12-01T17:09:21,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742139_1315 (size=441) 2024-12-01T17:09:21,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742139_1315 (size=441) 2024-12-01T17:09:21,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742140_1316 (size=21) 2024-12-01T17:09:21,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742140_1316 (size=21) 2024-12-01T17:09:21,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742140_1316 (size=21) 2024-12-01T17:09:22,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742141_1317 (size=304128) 2024-12-01T17:09:22,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742141_1317 (size=304128) 2024-12-01T17:09:22,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742141_1317 (size=304128) 2024-12-01T17:09:22,058 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-01T17:09:22,058 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-01T17:09:22,768 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0006_000001 (auth:SIMPLE) from 127.0.0.1:48478 2024-12-01T17:09:27,313 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0006_000001 (auth:SIMPLE) from 127.0.0.1:42936 2024-12-01T17:09:27,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742142_1318 (size=349826) 2024-12-01T17:09:27,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742142_1318 (size=349826) 2024-12-01T17:09:27,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742142_1318 (size=349826) 2024-12-01T17:09:29,500 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0006_000001 (auth:SIMPLE) from 127.0.0.1:60412 2024-12-01T17:09:29,501 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0006_000001 (auth:SIMPLE) from 127.0.0.1:48494 2024-12-01T17:09:32,806 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_3/usercache/jenkins/appcache/application_1733072744857_0006/container_1733072744857_0006_01_000002/launch_container.sh] 2024-12-01T17:09:32,807 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_3/usercache/jenkins/appcache/application_1733072744857_0006/container_1733072744857_0006_01_000002/container_tokens] 2024-12-01T17:09:32,807 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_3/usercache/jenkins/appcache/application_1733072744857_0006/container_1733072744857_0006_01_000002/sysfs] 2024-12-01T17:09:33,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742143_1319 (size=21186) 2024-12-01T17:09:33,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742143_1319 (size=21186) 2024-12-01T17:09:33,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742143_1319 (size=21186) 2024-12-01T17:09:33,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742144_1320 (size=462) 2024-12-01T17:09:33,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742144_1320 (size=462) 2024-12-01T17:09:33,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742144_1320 (size=462) 2024-12-01T17:09:33,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742145_1321 (size=21186) 2024-12-01T17:09:33,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742145_1321 (size=21186) 2024-12-01T17:09:33,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742145_1321 (size=21186) 2024-12-01T17:09:33,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742146_1322 (size=349826) 2024-12-01T17:09:33,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742146_1322 (size=349826) 2024-12-01T17:09:33,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742146_1322 (size=349826) 2024-12-01T17:09:33,530 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0006_000001 (auth:SIMPLE) from 127.0.0.1:36110 2024-12-01T17:09:33,549 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_3/usercache/jenkins/appcache/application_1733072744857_0006/container_1733072744857_0006_01_000003/sysfs] 2024-12-01T17:09:35,408 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-01T17:09:35,408 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-01T17:09:35,411 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testConsecutiveExports 2024-12-01T17:09:35,411 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-01T17:09:35,411 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-01T17:09:35,411 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-69539515_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-01T17:09:35,413 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-01T17:09:35,413 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-01T17:09:35,413 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in org.apache.hadoop.fs.LocalFileSystem@767e74a0 in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/local-export-1733072940830/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/local-export-1733072940830/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-01T17:09:35,413 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/local-export-1733072940830/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-01T17:09:35,413 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/local-export-1733072940830/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-01T17:09:35,433 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testtb-testConsecutiveExports 2024-12-01T17:09:35,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=125, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports 2024-12-01T17:09:35,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-12-01T17:09:35,439 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733072975438"}]},"ts":"1733072975438"} 2024-12-01T17:09:35,441 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLING in hbase:meta 2024-12-01T17:09:35,441 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testConsecutiveExports to state=DISABLING 2024-12-01T17:09:35,442 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports}] 2024-12-01T17:09:35,444 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=5429f20942a88aa4e36d8f0081a686f8, UNASSIGN}, {pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=eb34f7e309a981aebf4709e257d001d6, UNASSIGN}] 2024-12-01T17:09:35,445 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=5429f20942a88aa4e36d8f0081a686f8, UNASSIGN 2024-12-01T17:09:35,446 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=eb34f7e309a981aebf4709e257d001d6, UNASSIGN 2024-12-01T17:09:35,447 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=5429f20942a88aa4e36d8f0081a686f8, regionState=CLOSING, regionLocation=b87883a7882d,36521,1733072737896 2024-12-01T17:09:35,447 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=128 updating hbase:meta row=eb34f7e309a981aebf4709e257d001d6, regionState=CLOSING, regionLocation=b87883a7882d,44753,1733072737741 2024-12-01T17:09:35,451 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=5429f20942a88aa4e36d8f0081a686f8, UNASSIGN because future has completed 2024-12-01T17:09:35,452 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-01T17:09:35,452 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=129, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5429f20942a88aa4e36d8f0081a686f8, server=b87883a7882d,36521,1733072737896}] 2024-12-01T17:09:35,454 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=eb34f7e309a981aebf4709e257d001d6, UNASSIGN because future has completed 2024-12-01T17:09:35,454 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-01T17:09:35,455 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=130, ppid=128, state=RUNNABLE, hasLock=false; CloseRegionProcedure eb34f7e309a981aebf4709e257d001d6, server=b87883a7882d,44753,1733072737741}] 2024-12-01T17:09:35,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-12-01T17:09:35,605 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(122): Close 5429f20942a88aa4e36d8f0081a686f8 2024-12-01T17:09:35,605 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-01T17:09:35,606 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1722): Closing 5429f20942a88aa4e36d8f0081a686f8, disabling compactions & flushes 2024-12-01T17:09:35,606 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1733072935202.5429f20942a88aa4e36d8f0081a686f8. 2024-12-01T17:09:35,606 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1733072935202.5429f20942a88aa4e36d8f0081a686f8. 2024-12-01T17:09:35,606 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1733072935202.5429f20942a88aa4e36d8f0081a686f8. after waiting 0 ms 2024-12-01T17:09:35,606 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1733072935202.5429f20942a88aa4e36d8f0081a686f8. 2024-12-01T17:09:35,608 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(122): Close eb34f7e309a981aebf4709e257d001d6 2024-12-01T17:09:35,608 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-01T17:09:35,608 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1722): Closing eb34f7e309a981aebf4709e257d001d6, disabling compactions & flushes 2024-12-01T17:09:35,609 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1733072935202.eb34f7e309a981aebf4709e257d001d6. 2024-12-01T17:09:35,609 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1733072935202.eb34f7e309a981aebf4709e257d001d6. 2024-12-01T17:09:35,609 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1733072935202.eb34f7e309a981aebf4709e257d001d6. after waiting 0 ms 2024-12-01T17:09:35,609 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1733072935202.eb34f7e309a981aebf4709e257d001d6. 2024-12-01T17:09:35,611 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testConsecutiveExports/5429f20942a88aa4e36d8f0081a686f8/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-01T17:09:35,612 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-01T17:09:35,612 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1733072935202.5429f20942a88aa4e36d8f0081a686f8. 2024-12-01T17:09:35,612 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1676): Region close journal for 5429f20942a88aa4e36d8f0081a686f8: Waiting for close lock at 1733072975606Running coprocessor pre-close hooks at 1733072975606Disabling compacts and flushes for region at 1733072975606Disabling writes for close at 1733072975606Writing region close event to WAL at 1733072975607 (+1 ms)Running coprocessor post-close hooks at 1733072975612 (+5 ms)Closed at 1733072975612 2024-12-01T17:09:35,614 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(157): Closed 5429f20942a88aa4e36d8f0081a686f8 2024-12-01T17:09:35,615 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=5429f20942a88aa4e36d8f0081a686f8, regionState=CLOSED 2024-12-01T17:09:35,618 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testConsecutiveExports/eb34f7e309a981aebf4709e257d001d6/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-01T17:09:35,619 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=129, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5429f20942a88aa4e36d8f0081a686f8, server=b87883a7882d,36521,1733072737896 because future has completed 2024-12-01T17:09:35,619 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-01T17:09:35,619 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1733072935202.eb34f7e309a981aebf4709e257d001d6. 2024-12-01T17:09:35,620 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1676): Region close journal for eb34f7e309a981aebf4709e257d001d6: Waiting for close lock at 1733072975608Running coprocessor pre-close hooks at 1733072975608Disabling compacts and flushes for region at 1733072975608Disabling writes for close at 1733072975609 (+1 ms)Writing region close event to WAL at 1733072975609Running coprocessor post-close hooks at 1733072975619 (+10 ms)Closed at 1733072975619 2024-12-01T17:09:35,622 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(157): Closed eb34f7e309a981aebf4709e257d001d6 2024-12-01T17:09:35,624 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=128 updating hbase:meta row=eb34f7e309a981aebf4709e257d001d6, regionState=CLOSED 2024-12-01T17:09:35,625 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=129, resume processing ppid=127 2024-12-01T17:09:35,626 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=130, ppid=128, state=RUNNABLE, hasLock=false; CloseRegionProcedure eb34f7e309a981aebf4709e257d001d6, server=b87883a7882d,44753,1733072737741 because future has completed 2024-12-01T17:09:35,626 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=129, ppid=127, state=SUCCESS, hasLock=false; CloseRegionProcedure 5429f20942a88aa4e36d8f0081a686f8, server=b87883a7882d,36521,1733072737896 in 169 msec 2024-12-01T17:09:35,627 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=127, ppid=126, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=5429f20942a88aa4e36d8f0081a686f8, UNASSIGN in 181 msec 2024-12-01T17:09:35,629 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=130, resume processing ppid=128 2024-12-01T17:09:35,629 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=130, ppid=128, state=SUCCESS, hasLock=false; CloseRegionProcedure eb34f7e309a981aebf4709e257d001d6, server=b87883a7882d,44753,1733072737741 in 173 msec 2024-12-01T17:09:35,632 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=128, resume processing ppid=126 2024-12-01T17:09:35,632 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=128, ppid=126, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=eb34f7e309a981aebf4709e257d001d6, UNASSIGN in 185 msec 2024-12-01T17:09:35,636 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=126, resume processing ppid=125 2024-12-01T17:09:35,636 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=126, ppid=125, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports in 191 msec 2024-12-01T17:09:35,638 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733072975638"}]},"ts":"1733072975638"} 2024-12-01T17:09:35,640 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLED in hbase:meta 2024-12-01T17:09:35,640 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testConsecutiveExports to state=DISABLED 2024-12-01T17:09:35,643 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=125, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports in 208 msec 2024-12-01T17:09:35,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-12-01T17:09:35,750 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testConsecutiveExports completed 2024-12-01T17:09:35,751 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testtb-testConsecutiveExports 2024-12-01T17:09:35,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=131, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-01T17:09:35,753 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=131, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-01T17:09:35,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testConsecutiveExports 2024-12-01T17:09:35,754 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=131, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-01T17:09:35,756 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36521 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testConsecutiveExports 2024-12-01T17:09:35,759 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testConsecutiveExports/eb34f7e309a981aebf4709e257d001d6 2024-12-01T17:09:35,759 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testConsecutiveExports/5429f20942a88aa4e36d8f0081a686f8 2024-12-01T17:09:35,762 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testConsecutiveExports/eb34f7e309a981aebf4709e257d001d6/cf, FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testConsecutiveExports/eb34f7e309a981aebf4709e257d001d6/recovered.edits] 2024-12-01T17:09:35,762 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testConsecutiveExports/5429f20942a88aa4e36d8f0081a686f8/cf, FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testConsecutiveExports/5429f20942a88aa4e36d8f0081a686f8/recovered.edits] 2024-12-01T17:09:35,766 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testConsecutiveExports/eb34f7e309a981aebf4709e257d001d6/cf/8f3fd29c0a474cdea05f62f2f7f6821f to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testConsecutiveExports/eb34f7e309a981aebf4709e257d001d6/cf/8f3fd29c0a474cdea05f62f2f7f6821f 2024-12-01T17:09:35,766 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testConsecutiveExports/5429f20942a88aa4e36d8f0081a686f8/cf/6476ed675d2742999963d96b4f192969 to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testConsecutiveExports/5429f20942a88aa4e36d8f0081a686f8/cf/6476ed675d2742999963d96b4f192969 2024-12-01T17:09:35,769 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testConsecutiveExports/eb34f7e309a981aebf4709e257d001d6/recovered.edits/9.seqid to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testConsecutiveExports/eb34f7e309a981aebf4709e257d001d6/recovered.edits/9.seqid 2024-12-01T17:09:35,770 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testConsecutiveExports/5429f20942a88aa4e36d8f0081a686f8/recovered.edits/9.seqid to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testConsecutiveExports/5429f20942a88aa4e36d8f0081a686f8/recovered.edits/9.seqid 2024-12-01T17:09:35,770 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testConsecutiveExports/eb34f7e309a981aebf4709e257d001d6 2024-12-01T17:09:35,770 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testConsecutiveExports/5429f20942a88aa4e36d8f0081a686f8 2024-12-01T17:09:35,770 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testConsecutiveExports regions 2024-12-01T17:09:35,773 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=131, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-01T17:09:35,777 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testConsecutiveExports from hbase:meta 2024-12-01T17:09:35,780 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testConsecutiveExports' descriptor. 2024-12-01T17:09:35,781 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=131, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-01T17:09:35,782 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testConsecutiveExports' from region states. 2024-12-01T17:09:35,782 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,,1733072935202.5429f20942a88aa4e36d8f0081a686f8.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733072975782"}]},"ts":"9223372036854775807"} 2024-12-01T17:09:35,782 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,1,1733072935202.eb34f7e309a981aebf4709e257d001d6.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733072975782"}]},"ts":"9223372036854775807"} 2024-12-01T17:09:35,784 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-01T17:09:35,784 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 5429f20942a88aa4e36d8f0081a686f8, NAME => 'testtb-testConsecutiveExports,,1733072935202.5429f20942a88aa4e36d8f0081a686f8.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => eb34f7e309a981aebf4709e257d001d6, NAME => 'testtb-testConsecutiveExports,1,1733072935202.eb34f7e309a981aebf4709e257d001d6.', STARTKEY => '1', ENDKEY => ''}] 2024-12-01T17:09:35,784 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testConsecutiveExports' as deleted. 2024-12-01T17:09:35,785 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733072975784"}]},"ts":"9223372036854775807"} 2024-12-01T17:09:35,787 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testConsecutiveExports state from META 2024-12-01T17:09:35,788 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=131, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-01T17:09:35,789 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=131, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports in 37 msec 2024-12-01T17:09:35,859 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-01T17:09:35,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-01T17:09:35,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-01T17:09:35,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-01T17:09:35,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-01T17:09:35,909 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-01T17:09:35,909 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-01T17:09:35,909 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-01T17:09:35,909 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-01T17:09:35,918 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-01T17:09:35,918 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-01T17:09:35,918 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-01T17:09:35,918 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:09:35,918 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:09:35,918 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:09:35,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-01T17:09:35,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:09:35,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-12-01T17:09:35,920 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testConsecutiveExports 2024-12-01T17:09:35,921 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testConsecutiveExports completed 2024-12-01T17:09:35,927 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "emptySnaptb0-testConsecutiveExports" type: DISABLED 2024-12-01T17:09:35,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testConsecutiveExports 2024-12-01T17:09:35,931 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb0-testConsecutiveExports" type: DISABLED 2024-12-01T17:09:35,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testConsecutiveExports 2024-12-01T17:09:35,957 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=808 (was 810), OpenFileDescriptor=809 (was 813), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=748 (was 723) - SystemLoadAverage LEAK? -, ProcessCount=19 (was 22), AvailableMemoryMB=3280 (was 3487) 2024-12-01T17:09:35,957 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=808 is superior to 500 2024-12-01T17:09:35,976 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=808, OpenFileDescriptor=809, MaxFileDescriptor=1048576, SystemLoadAverage=748, ProcessCount=19, AvailableMemoryMB=3279 2024-12-01T17:09:35,976 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=808 is superior to 500 2024-12-01T17:09:35,978 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T17:09:35,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=132, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-01T17:09:35,981 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-01T17:09:35,981 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:09:35,981 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion" procId is: 132 2024-12-01T17:09:35,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-01T17:09:35,982 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-01T17:09:36,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742147_1323 (size=422) 2024-12-01T17:09:36,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742147_1323 (size=422) 2024-12-01T17:09:36,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742147_1323 (size=422) 2024-12-01T17:09:36,005 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 3b1abb99d0c92014cf2359f8bca13e19, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733072975977.3b1abb99d0c92014cf2359f8bca13e19.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:09:36,005 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 9c76487e5a72f5c3f3d5290677627bc3, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733072975977.9c76487e5a72f5c3f3d5290677627bc3.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:09:36,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742148_1324 (size=83) 2024-12-01T17:09:36,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742148_1324 (size=83) 2024-12-01T17:09:36,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742149_1325 (size=83) 2024-12-01T17:09:36,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742149_1325 (size=83) 2024-12-01T17:09:36,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742148_1324 (size=83) 2024-12-01T17:09:36,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742149_1325 (size=83) 2024-12-01T17:09:36,013 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1733072975977.9c76487e5a72f5c3f3d5290677627bc3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:09:36,013 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1733072975977.3b1abb99d0c92014cf2359f8bca13e19.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:09:36,013 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1722): Closing 9c76487e5a72f5c3f3d5290677627bc3, disabling compactions & flushes 2024-12-01T17:09:36,013 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1722): Closing 3b1abb99d0c92014cf2359f8bca13e19, disabling compactions & flushes 2024-12-01T17:09:36,013 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1733072975977.9c76487e5a72f5c3f3d5290677627bc3. 2024-12-01T17:09:36,013 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1733072975977.3b1abb99d0c92014cf2359f8bca13e19. 2024-12-01T17:09:36,013 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733072975977.9c76487e5a72f5c3f3d5290677627bc3. 2024-12-01T17:09:36,013 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733072975977.3b1abb99d0c92014cf2359f8bca13e19. 2024-12-01T17:09:36,013 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733072975977.9c76487e5a72f5c3f3d5290677627bc3. after waiting 0 ms 2024-12-01T17:09:36,013 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733072975977.3b1abb99d0c92014cf2359f8bca13e19. after waiting 0 ms 2024-12-01T17:09:36,013 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1733072975977.9c76487e5a72f5c3f3d5290677627bc3. 2024-12-01T17:09:36,013 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1733072975977.3b1abb99d0c92014cf2359f8bca13e19. 2024-12-01T17:09:36,013 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1733072975977.9c76487e5a72f5c3f3d5290677627bc3. 2024-12-01T17:09:36,013 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1733072975977.3b1abb99d0c92014cf2359f8bca13e19. 2024-12-01T17:09:36,013 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for 3b1abb99d0c92014cf2359f8bca13e19: Waiting for close lock at 1733072976013Disabling compacts and flushes for region at 1733072976013Disabling writes for close at 1733072976013Writing region close event to WAL at 1733072976013Closed at 1733072976013 2024-12-01T17:09:36,013 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for 9c76487e5a72f5c3f3d5290677627bc3: Waiting for close lock at 1733072976013Disabling compacts and flushes for region at 1733072976013Disabling writes for close at 1733072976013Writing region close event to WAL at 1733072976013Closed at 1733072976013 2024-12-01T17:09:36,014 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-01T17:09:36,015 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1733072975977.3b1abb99d0c92014cf2359f8bca13e19.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733072976014"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733072976014"}]},"ts":"1733072976014"} 2024-12-01T17:09:36,015 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1733072975977.9c76487e5a72f5c3f3d5290677627bc3.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733072976014"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733072976014"}]},"ts":"1733072976014"} 2024-12-01T17:09:36,017 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-01T17:09:36,018 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-01T17:09:36,018 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733072976018"}]},"ts":"1733072976018"} 2024-12-01T17:09:36,021 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLING in hbase:meta 2024-12-01T17:09:36,021 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {b87883a7882d=0} racks are {/default-rack=0} 2024-12-01T17:09:36,022 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-01T17:09:36,022 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-01T17:09:36,022 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-01T17:09:36,022 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-01T17:09:36,022 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-01T17:09:36,022 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-01T17:09:36,022 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-01T17:09:36,022 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-01T17:09:36,022 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-01T17:09:36,022 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-01T17:09:36,023 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=3b1abb99d0c92014cf2359f8bca13e19, ASSIGN}, {pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=9c76487e5a72f5c3f3d5290677627bc3, ASSIGN}] 2024-12-01T17:09:36,024 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=3b1abb99d0c92014cf2359f8bca13e19, ASSIGN 2024-12-01T17:09:36,024 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=9c76487e5a72f5c3f3d5290677627bc3, ASSIGN 2024-12-01T17:09:36,025 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=3b1abb99d0c92014cf2359f8bca13e19, ASSIGN; state=OFFLINE, location=b87883a7882d,44753,1733072737741; forceNewPlan=false, retain=false 2024-12-01T17:09:36,025 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=9c76487e5a72f5c3f3d5290677627bc3, ASSIGN; state=OFFLINE, location=b87883a7882d,41375,1733072737967; forceNewPlan=false, retain=false 2024-12-01T17:09:36,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-01T17:09:36,175 INFO [b87883a7882d:38737 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-01T17:09:36,176 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=134 updating hbase:meta row=9c76487e5a72f5c3f3d5290677627bc3, regionState=OPENING, regionLocation=b87883a7882d,41375,1733072737967 2024-12-01T17:09:36,176 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=3b1abb99d0c92014cf2359f8bca13e19, regionState=OPENING, regionLocation=b87883a7882d,44753,1733072737741 2024-12-01T17:09:36,178 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=3b1abb99d0c92014cf2359f8bca13e19, ASSIGN because future has completed 2024-12-01T17:09:36,178 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=135, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3b1abb99d0c92014cf2359f8bca13e19, server=b87883a7882d,44753,1733072737741}] 2024-12-01T17:09:36,179 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=9c76487e5a72f5c3f3d5290677627bc3, ASSIGN because future has completed 2024-12-01T17:09:36,179 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=136, ppid=134, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9c76487e5a72f5c3f3d5290677627bc3, server=b87883a7882d,41375,1733072737967}] 2024-12-01T17:09:36,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-01T17:09:36,333 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,,1733072975977.3b1abb99d0c92014cf2359f8bca13e19. 2024-12-01T17:09:36,333 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7752): Opening region: {ENCODED => 3b1abb99d0c92014cf2359f8bca13e19, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733072975977.3b1abb99d0c92014cf2359f8bca13e19.', STARTKEY => '', ENDKEY => '1'} 2024-12-01T17:09:36,334 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,,1733072975977.3b1abb99d0c92014cf2359f8bca13e19. service=AccessControlService 2024-12-01T17:09:36,334 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-01T17:09:36,334 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,1,1733072975977.9c76487e5a72f5c3f3d5290677627bc3. 2024-12-01T17:09:36,334 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7752): Opening region: {ENCODED => 9c76487e5a72f5c3f3d5290677627bc3, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733072975977.9c76487e5a72f5c3f3d5290677627bc3.', STARTKEY => '1', ENDKEY => ''} 2024-12-01T17:09:36,334 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 3b1abb99d0c92014cf2359f8bca13e19 2024-12-01T17:09:36,334 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1733072975977.3b1abb99d0c92014cf2359f8bca13e19.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:09:36,334 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,1,1733072975977.9c76487e5a72f5c3f3d5290677627bc3. service=AccessControlService 2024-12-01T17:09:36,334 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7794): checking encryption for 3b1abb99d0c92014cf2359f8bca13e19 2024-12-01T17:09:36,334 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7797): checking classloading for 3b1abb99d0c92014cf2359f8bca13e19 2024-12-01T17:09:36,335 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-01T17:09:36,335 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 9c76487e5a72f5c3f3d5290677627bc3 2024-12-01T17:09:36,335 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1733072975977.9c76487e5a72f5c3f3d5290677627bc3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:09:36,335 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7794): checking encryption for 9c76487e5a72f5c3f3d5290677627bc3 2024-12-01T17:09:36,335 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7797): checking classloading for 9c76487e5a72f5c3f3d5290677627bc3 2024-12-01T17:09:36,336 INFO [StoreOpener-9c76487e5a72f5c3f3d5290677627bc3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 9c76487e5a72f5c3f3d5290677627bc3 2024-12-01T17:09:36,336 INFO [StoreOpener-3b1abb99d0c92014cf2359f8bca13e19-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 3b1abb99d0c92014cf2359f8bca13e19 2024-12-01T17:09:36,337 INFO [StoreOpener-3b1abb99d0c92014cf2359f8bca13e19-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3b1abb99d0c92014cf2359f8bca13e19 columnFamilyName cf 2024-12-01T17:09:36,337 INFO [StoreOpener-9c76487e5a72f5c3f3d5290677627bc3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9c76487e5a72f5c3f3d5290677627bc3 columnFamilyName cf 2024-12-01T17:09:36,337 DEBUG [StoreOpener-3b1abb99d0c92014cf2359f8bca13e19-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:09:36,337 DEBUG [StoreOpener-9c76487e5a72f5c3f3d5290677627bc3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:09:36,338 INFO [StoreOpener-3b1abb99d0c92014cf2359f8bca13e19-1 {}] regionserver.HStore(327): Store=3b1abb99d0c92014cf2359f8bca13e19/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T17:09:36,338 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1038): replaying wal for 3b1abb99d0c92014cf2359f8bca13e19 2024-12-01T17:09:36,338 INFO [StoreOpener-9c76487e5a72f5c3f3d5290677627bc3-1 {}] regionserver.HStore(327): Store=9c76487e5a72f5c3f3d5290677627bc3/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T17:09:36,338 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1038): replaying wal for 9c76487e5a72f5c3f3d5290677627bc3 2024-12-01T17:09:36,339 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion/9c76487e5a72f5c3f3d5290677627bc3 2024-12-01T17:09:36,339 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion/9c76487e5a72f5c3f3d5290677627bc3 2024-12-01T17:09:36,339 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1048): stopping wal replay for 9c76487e5a72f5c3f3d5290677627bc3 2024-12-01T17:09:36,339 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1060): Cleaning up temporary data for 9c76487e5a72f5c3f3d5290677627bc3 2024-12-01T17:09:36,340 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion/3b1abb99d0c92014cf2359f8bca13e19 2024-12-01T17:09:36,340 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion/3b1abb99d0c92014cf2359f8bca13e19 2024-12-01T17:09:36,341 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1048): stopping wal replay for 3b1abb99d0c92014cf2359f8bca13e19 2024-12-01T17:09:36,341 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1060): Cleaning up temporary data for 3b1abb99d0c92014cf2359f8bca13e19 2024-12-01T17:09:36,341 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1093): writing seq id for 9c76487e5a72f5c3f3d5290677627bc3 2024-12-01T17:09:36,344 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1093): writing seq id for 3b1abb99d0c92014cf2359f8bca13e19 2024-12-01T17:09:36,346 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion/9c76487e5a72f5c3f3d5290677627bc3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T17:09:36,346 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion/3b1abb99d0c92014cf2359f8bca13e19/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T17:09:36,347 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1114): Opened 9c76487e5a72f5c3f3d5290677627bc3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63660164, jitterRate=-0.05138963460922241}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T17:09:36,347 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 9c76487e5a72f5c3f3d5290677627bc3 2024-12-01T17:09:36,347 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1114): Opened 3b1abb99d0c92014cf2359f8bca13e19; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69187017, jitterRate=0.03096689283847809}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T17:09:36,347 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3b1abb99d0c92014cf2359f8bca13e19 2024-12-01T17:09:36,347 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1006): Region open journal for 3b1abb99d0c92014cf2359f8bca13e19: Running coprocessor pre-open hook at 1733072976334Writing region info on filesystem at 1733072976335 (+1 ms)Initializing all the Stores at 1733072976335Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733072976335Cleaning up temporary data from old regions at 1733072976341 (+6 ms)Running coprocessor post-open hooks at 1733072976347 (+6 ms)Region opened successfully at 1733072976347 2024-12-01T17:09:36,347 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1006): Region open journal for 9c76487e5a72f5c3f3d5290677627bc3: Running coprocessor pre-open hook at 1733072976335Writing region info on filesystem at 1733072976335Initializing all the Stores at 1733072976335Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733072976335Cleaning up temporary data from old regions at 1733072976339 (+4 ms)Running coprocessor post-open hooks at 1733072976347 (+8 ms)Region opened successfully at 1733072976347 2024-12-01T17:09:36,350 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,1,1733072975977.9c76487e5a72f5c3f3d5290677627bc3., pid=136, masterSystemTime=1733072976330 2024-12-01T17:09:36,352 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,1,1733072975977.9c76487e5a72f5c3f3d5290677627bc3. 2024-12-01T17:09:36,352 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,1,1733072975977.9c76487e5a72f5c3f3d5290677627bc3. 2024-12-01T17:09:36,352 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,,1733072975977.3b1abb99d0c92014cf2359f8bca13e19., pid=135, masterSystemTime=1733072976330 2024-12-01T17:09:36,353 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=134 updating hbase:meta row=9c76487e5a72f5c3f3d5290677627bc3, regionState=OPEN, openSeqNum=2, regionLocation=b87883a7882d,41375,1733072737967 2024-12-01T17:09:36,355 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=136, ppid=134, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9c76487e5a72f5c3f3d5290677627bc3, server=b87883a7882d,41375,1733072737967 because future has completed 2024-12-01T17:09:36,356 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,,1733072975977.3b1abb99d0c92014cf2359f8bca13e19. 2024-12-01T17:09:36,356 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,,1733072975977.3b1abb99d0c92014cf2359f8bca13e19. 2024-12-01T17:09:36,356 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=3b1abb99d0c92014cf2359f8bca13e19, regionState=OPEN, openSeqNum=2, regionLocation=b87883a7882d,44753,1733072737741 2024-12-01T17:09:36,361 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=135, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3b1abb99d0c92014cf2359f8bca13e19, server=b87883a7882d,44753,1733072737741 because future has completed 2024-12-01T17:09:36,364 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=136, resume processing ppid=134 2024-12-01T17:09:36,364 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=136, ppid=134, state=SUCCESS, hasLock=false; OpenRegionProcedure 9c76487e5a72f5c3f3d5290677627bc3, server=b87883a7882d,41375,1733072737967 in 182 msec 2024-12-01T17:09:36,366 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=135, resume processing ppid=133 2024-12-01T17:09:36,366 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=135, ppid=133, state=SUCCESS, hasLock=false; OpenRegionProcedure 3b1abb99d0c92014cf2359f8bca13e19, server=b87883a7882d,44753,1733072737741 in 185 msec 2024-12-01T17:09:36,366 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=134, ppid=132, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=9c76487e5a72f5c3f3d5290677627bc3, ASSIGN in 341 msec 2024-12-01T17:09:36,368 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=133, resume processing ppid=132 2024-12-01T17:09:36,368 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=133, ppid=132, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=3b1abb99d0c92014cf2359f8bca13e19, ASSIGN in 343 msec 2024-12-01T17:09:36,369 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-01T17:09:36,369 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733072976369"}]},"ts":"1733072976369"} 2024-12-01T17:09:36,371 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLED in hbase:meta 2024-12-01T17:09:36,372 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-01T17:09:36,372 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion jenkins: RWXCA 2024-12-01T17:09:36,377 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36521 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-01T17:09:36,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:09:36,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:09:36,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:09:36,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:09:36,441 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-01T17:09:36,441 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-01T17:09:36,441 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-01T17:09:36,441 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-01T17:09:36,443 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=132, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 463 msec 2024-12-01T17:09:36,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-01T17:09:36,610 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-01T17:09:36,610 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemStateWithMergeRegion get assigned. Timeout = 60000ms 2024-12-01T17:09:36,610 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T17:09:36,614 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned to meta. Checking AM states. 2024-12-01T17:09:36,614 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T17:09:36,614 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned. 2024-12-01T17:09:36,614 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-01T17:09:36,617 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-01T17:09:36,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733072976617 (current time:1733072976617). 2024-12-01T17:09:36,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-01T17:09:36,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-01T17:09:36,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-01T17:09:36,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ab27615, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:09:36,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:09:36,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:09:36,619 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:09:36,619 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:09:36,619 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:09:36,619 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@50311d99, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:09:36,619 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:09:36,620 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:09:36,620 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:09:36,620 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58566, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:09:36,621 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c8706b9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:09:36,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:09:36,622 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:09:36,622 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:09:36,623 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50724, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:09:36,624 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737. 2024-12-01T17:09:36,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:09:36,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:09:36,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:09:36,624 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:09:36,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b1c80c8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:09:36,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:09:36,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:09:36,626 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:09:36,626 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:09:36,626 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:09:36,627 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7fa0c148, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:09:36,627 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:09:36,627 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:09:36,627 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:09:36,628 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58590, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:09:36,628 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ea8ba85, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:09:36,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:09:36,630 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:09:36,630 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:09:36,631 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50732, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:09:36,633 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:09:36,633 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:09:36,634 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38330, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:09:36,635 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737. 2024-12-01T17:09:36,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor278.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:09:36,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:09:36,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:09:36,636 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:09:36,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-01T17:09:36,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-01T17:09:36,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=137, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-01T17:09:36,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 137 2024-12-01T17:09:36,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-12-01T17:09:36,639 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-01T17:09:36,640 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-01T17:09:36,642 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-01T17:09:36,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742150_1326 (size=215) 2024-12-01T17:09:36,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742150_1326 (size=215) 2024-12-01T17:09:36,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742150_1326 (size=215) 2024-12-01T17:09:36,654 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-01T17:09:36,654 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3b1abb99d0c92014cf2359f8bca13e19}, {pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9c76487e5a72f5c3f3d5290677627bc3}] 2024-12-01T17:09:36,660 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3b1abb99d0c92014cf2359f8bca13e19 2024-12-01T17:09:36,660 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9c76487e5a72f5c3f3d5290677627bc3 2024-12-01T17:09:36,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-12-01T17:09:36,812 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44753 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=138 2024-12-01T17:09:36,812 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41375 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=139 2024-12-01T17:09:36,812 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733072975977.9c76487e5a72f5c3f3d5290677627bc3. 2024-12-01T17:09:36,812 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733072975977.3b1abb99d0c92014cf2359f8bca13e19. 2024-12-01T17:09:36,812 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.HRegion(2603): Flush status journal for 9c76487e5a72f5c3f3d5290677627bc3: 2024-12-01T17:09:36,812 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1733072975977.9c76487e5a72f5c3f3d5290677627bc3. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-01T17:09:36,812 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.HRegion(2603): Flush status journal for 3b1abb99d0c92014cf2359f8bca13e19: 2024-12-01T17:09:36,812 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1733072975977.3b1abb99d0c92014cf2359f8bca13e19. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-01T17:09:36,812 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1733072975977.9c76487e5a72f5c3f3d5290677627bc3.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-01T17:09:36,812 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1733072975977.3b1abb99d0c92014cf2359f8bca13e19.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-01T17:09:36,813 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:09:36,813 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:09:36,813 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-01T17:09:36,813 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-01T17:09:36,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742151_1327 (size=86) 2024-12-01T17:09:36,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742151_1327 (size=86) 2024-12-01T17:09:36,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742151_1327 (size=86) 2024-12-01T17:09:36,837 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733072975977.3b1abb99d0c92014cf2359f8bca13e19. 2024-12-01T17:09:36,837 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=138 2024-12-01T17:09:36,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=138 2024-12-01T17:09:36,837 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 3b1abb99d0c92014cf2359f8bca13e19 2024-12-01T17:09:36,837 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3b1abb99d0c92014cf2359f8bca13e19 2024-12-01T17:09:36,839 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=138, ppid=137, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 3b1abb99d0c92014cf2359f8bca13e19 in 184 msec 2024-12-01T17:09:36,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742152_1328 (size=86) 2024-12-01T17:09:36,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742152_1328 (size=86) 2024-12-01T17:09:36,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742152_1328 (size=86) 2024-12-01T17:09:36,852 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733072975977.9c76487e5a72f5c3f3d5290677627bc3. 2024-12-01T17:09:36,852 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-12-01T17:09:36,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=139 2024-12-01T17:09:36,853 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 9c76487e5a72f5c3f3d5290677627bc3 2024-12-01T17:09:36,853 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9c76487e5a72f5c3f3d5290677627bc3 2024-12-01T17:09:36,855 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=139, resume processing ppid=137 2024-12-01T17:09:36,855 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-01T17:09:36,855 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=139, ppid=137, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 9c76487e5a72f5c3f3d5290677627bc3 in 200 msec 2024-12-01T17:09:36,856 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-01T17:09:36,857 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-01T17:09:36,857 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-01T17:09:36,857 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-01T17:09:36,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742153_1329 (size=597) 2024-12-01T17:09:36,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742153_1329 (size=597) 2024-12-01T17:09:36,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742153_1329 (size=597) 2024-12-01T17:09:36,866 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-01T17:09:36,871 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-01T17:09:36,872 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-01T17:09:36,873 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-01T17:09:36,874 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 137 2024-12-01T17:09:36,875 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=137, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 237 msec 2024-12-01T17:09:36,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-12-01T17:09:36,960 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-01T17:09:36,965 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='04ac35dd02a48b467ca26479d5008c8b3', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,,1733072975977.3b1abb99d0c92014cf2359f8bca13e19., hostname=b87883a7882d,44753,1733072737741, seqNum=2] 2024-12-01T17:09:36,970 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='15a2cb719f2f4b5a09a627726a51846fc', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1733072975977.9c76487e5a72f5c3f3d5290677627bc3., hostname=b87883a7882d,41375,1733072737967, seqNum=2] 2024-12-01T17:09:36,971 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='36d281af51444155c67ae10807b28abad', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1733072975977.9c76487e5a72f5c3f3d5290677627bc3., hostname=b87883a7882d,41375,1733072737967, seqNum=2] 2024-12-01T17:09:36,972 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='2f97744e020be22b2348636aec3220c3d', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1733072975977.9c76487e5a72f5c3f3d5290677627bc3., hostname=b87883a7882d,41375,1733072737967, seqNum=2] 2024-12-01T17:09:36,977 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='4458db28ba866cd80662f098fa3fef762', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1733072975977.9c76487e5a72f5c3f3d5290677627bc3., hostname=b87883a7882d,41375,1733072737967, seqNum=2] 2024-12-01T17:09:36,978 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='5e0e46b37744fc0adfbca7918b1578dda', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1733072975977.9c76487e5a72f5c3f3d5290677627bc3., hostname=b87883a7882d,41375,1733072737967, seqNum=2] 2024-12-01T17:09:36,979 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44753 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,,1733072975977.3b1abb99d0c92014cf2359f8bca13e19. with WAL disabled. Data may be lost in the event of a crash. 2024-12-01T17:09:36,980 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41375 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,1,1733072975977.9c76487e5a72f5c3f3d5290677627bc3. with WAL disabled. Data may be lost in the event of a crash. 2024-12-01T17:09:36,980 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='6433246534f000feee7d51c14fff2da82', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1733072975977.9c76487e5a72f5c3f3d5290677627bc3., hostname=b87883a7882d,41375,1733072737967, seqNum=2] 2024-12-01T17:09:36,981 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='c5b0433bc3514e0f67c4ad7f879bf6f9', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1733072975977.9c76487e5a72f5c3f3d5290677627bc3., hostname=b87883a7882d,41375,1733072737967, seqNum=2] 2024-12-01T17:09:36,981 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-01T17:09:36,982 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='fd71d02eb974d519b25c6c2154565277', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1733072975977.9c76487e5a72f5c3f3d5290677627bc3., hostname=b87883a7882d,41375,1733072737967, seqNum=2] 2024-12-01T17:09:36,985 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-12-01T17:09:36,985 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1733072975977.3b1abb99d0c92014cf2359f8bca13e19. 2024-12-01T17:09:36,985 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T17:09:36,987 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-01T17:09:36,996 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-01T17:09:37,005 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-01T17:09:37,008 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-01T17:09:37,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733072977008 (current time:1733072977008). 2024-12-01T17:09:37,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-01T17:09:37,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-01T17:09:37,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-01T17:09:37,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d690fd9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:09:37,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:09:37,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:09:37,021 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:09:37,021 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:09:37,022 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:09:37,022 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@607d92cc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:09:37,022 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:09:37,022 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:09:37,022 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:09:37,023 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58612, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:09:37,024 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48feb370, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:09:37,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:09:37,025 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:09:37,026 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:09:37,027 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50748, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:09:37,028 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737. 2024-12-01T17:09:37,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:09:37,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:09:37,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:09:37,029 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:09:37,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54fc6fba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:09:37,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:09:37,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:09:37,030 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:09:37,030 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:09:37,031 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:09:37,031 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1fa5df2c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:09:37,031 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:09:37,031 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:09:37,031 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:09:37,032 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58632, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:09:37,033 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d798a80, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:09:37,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:09:37,034 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:09:37,035 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:09:37,036 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50758, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:09:37,038 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:09:37,038 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:09:37,040 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38338, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:09:37,040 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737. 2024-12-01T17:09:37,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor278.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:09:37,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:09:37,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:09:37,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-01T17:09:37,041 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:09:37,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-01T17:09:37,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=140, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-01T17:09:37,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 140 2024-12-01T17:09:37,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-01T17:09:37,044 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-01T17:09:37,046 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-01T17:09:37,049 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-01T17:09:37,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742154_1330 (size=210) 2024-12-01T17:09:37,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742154_1330 (size=210) 2024-12-01T17:09:37,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742154_1330 (size=210) 2024-12-01T17:09:37,083 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-01T17:09:37,084 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3b1abb99d0c92014cf2359f8bca13e19}, {pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9c76487e5a72f5c3f3d5290677627bc3}] 2024-12-01T17:09:37,085 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3b1abb99d0c92014cf2359f8bca13e19 2024-12-01T17:09:37,085 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9c76487e5a72f5c3f3d5290677627bc3 2024-12-01T17:09:37,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-01T17:09:37,238 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44753 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=141 2024-12-01T17:09:37,238 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41375 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=142 2024-12-01T17:09:37,238 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733072975977.9c76487e5a72f5c3f3d5290677627bc3. 2024-12-01T17:09:37,238 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733072975977.3b1abb99d0c92014cf2359f8bca13e19. 2024-12-01T17:09:37,239 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2902): Flushing 9c76487e5a72f5c3f3d5290677627bc3 1/1 column families, dataSize=3.19 KB heapSize=7.14 KB 2024-12-01T17:09:37,239 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2902): Flushing 3b1abb99d0c92014cf2359f8bca13e19 1/1 column families, dataSize=65 B heapSize=400 B 2024-12-01T17:09:37,260 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion/3b1abb99d0c92014cf2359f8bca13e19/.tmp/cf/c68493ac04634af8910f790f99c6e96a is 69, key is 04ac35dd02a48b467ca26479d5008c8b3/cf:q/1733072976979/Put/seqid=0 2024-12-01T17:09:37,260 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion/9c76487e5a72f5c3f3d5290677627bc3/.tmp/cf/fd2339b42bd641e8a82f56852d290a0a is 71, key is 176e02ed1c3705d894a60dbfbb076519/cf:q/1733072976979/Put/seqid=0 2024-12-01T17:09:37,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742156_1332 (size=5149) 2024-12-01T17:09:37,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742156_1332 (size=5149) 2024-12-01T17:09:37,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742156_1332 (size=5149) 2024-12-01T17:09:37,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742155_1331 (size=8462) 2024-12-01T17:09:37,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742155_1331 (size=8462) 2024-12-01T17:09:37,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742155_1331 (size=8462) 2024-12-01T17:09:37,266 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion/3b1abb99d0c92014cf2359f8bca13e19/.tmp/cf/c68493ac04634af8910f790f99c6e96a 2024-12-01T17:09:37,266 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.19 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion/9c76487e5a72f5c3f3d5290677627bc3/.tmp/cf/fd2339b42bd641e8a82f56852d290a0a 2024-12-01T17:09:37,270 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion/3b1abb99d0c92014cf2359f8bca13e19/.tmp/cf/c68493ac04634af8910f790f99c6e96a as hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion/3b1abb99d0c92014cf2359f8bca13e19/cf/c68493ac04634af8910f790f99c6e96a 2024-12-01T17:09:37,270 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion/9c76487e5a72f5c3f3d5290677627bc3/.tmp/cf/fd2339b42bd641e8a82f56852d290a0a as hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion/9c76487e5a72f5c3f3d5290677627bc3/cf/fd2339b42bd641e8a82f56852d290a0a 2024-12-01T17:09:37,275 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion/9c76487e5a72f5c3f3d5290677627bc3/cf/fd2339b42bd641e8a82f56852d290a0a, entries=49, sequenceid=6, filesize=8.3 K 2024-12-01T17:09:37,275 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion/3b1abb99d0c92014cf2359f8bca13e19/cf/c68493ac04634af8910f790f99c6e96a, entries=1, sequenceid=6, filesize=5.0 K 2024-12-01T17:09:37,275 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(3140): Finished flush of dataSize ~65 B/65, heapSize ~384 B/384, currentSize=0 B/0 for 3b1abb99d0c92014cf2359f8bca13e19 in 36ms, sequenceid=6, compaction requested=false 2024-12-01T17:09:37,275 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(3140): Finished flush of dataSize ~3.19 KB/3271, heapSize ~7.13 KB/7296, currentSize=0 B/0 for 9c76487e5a72f5c3f3d5290677627bc3 in 36ms, sequenceid=6, compaction requested=false 2024-12-01T17:09:37,275 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion' 2024-12-01T17:09:37,275 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion' 2024-12-01T17:09:37,276 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2603): Flush status journal for 3b1abb99d0c92014cf2359f8bca13e19: 2024-12-01T17:09:37,276 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2603): Flush status journal for 9c76487e5a72f5c3f3d5290677627bc3: 2024-12-01T17:09:37,276 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1733072975977.3b1abb99d0c92014cf2359f8bca13e19. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-01T17:09:37,276 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1733072975977.9c76487e5a72f5c3f3d5290677627bc3. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-01T17:09:37,276 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1733072975977.3b1abb99d0c92014cf2359f8bca13e19.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-01T17:09:37,276 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1733072975977.9c76487e5a72f5c3f3d5290677627bc3.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-01T17:09:37,276 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:09:37,276 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:09:37,276 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion/3b1abb99d0c92014cf2359f8bca13e19/cf/c68493ac04634af8910f790f99c6e96a] hfiles 2024-12-01T17:09:37,276 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion/9c76487e5a72f5c3f3d5290677627bc3/cf/fd2339b42bd641e8a82f56852d290a0a] hfiles 2024-12-01T17:09:37,276 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion/9c76487e5a72f5c3f3d5290677627bc3/cf/fd2339b42bd641e8a82f56852d290a0a for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-01T17:09:37,276 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion/3b1abb99d0c92014cf2359f8bca13e19/cf/c68493ac04634af8910f790f99c6e96a for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-01T17:09:37,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742157_1333 (size=125) 2024-12-01T17:09:37,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742158_1334 (size=125) 2024-12-01T17:09:37,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742157_1333 (size=125) 2024-12-01T17:09:37,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742157_1333 (size=125) 2024-12-01T17:09:37,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742158_1334 (size=125) 2024-12-01T17:09:37,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742158_1334 (size=125) 2024-12-01T17:09:37,290 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733072975977.3b1abb99d0c92014cf2359f8bca13e19. 2024-12-01T17:09:37,290 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733072975977.9c76487e5a72f5c3f3d5290677627bc3. 2024-12-01T17:09:37,290 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-12-01T17:09:37,290 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=142 2024-12-01T17:09:37,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=142 2024-12-01T17:09:37,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=141 2024-12-01T17:09:37,291 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 9c76487e5a72f5c3f3d5290677627bc3 2024-12-01T17:09:37,291 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 3b1abb99d0c92014cf2359f8bca13e19 2024-12-01T17:09:37,291 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9c76487e5a72f5c3f3d5290677627bc3 2024-12-01T17:09:37,291 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3b1abb99d0c92014cf2359f8bca13e19 2024-12-01T17:09:37,293 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=141, ppid=140, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 3b1abb99d0c92014cf2359f8bca13e19 in 209 msec 2024-12-01T17:09:37,294 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=142, resume processing ppid=140 2024-12-01T17:09:37,294 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=142, ppid=140, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 9c76487e5a72f5c3f3d5290677627bc3 in 209 msec 2024-12-01T17:09:37,294 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-01T17:09:37,294 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-01T17:09:37,295 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-01T17:09:37,295 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-01T17:09:37,295 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-01T17:09:37,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742159_1335 (size=675) 2024-12-01T17:09:37,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742159_1335 (size=675) 2024-12-01T17:09:37,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742159_1335 (size=675) 2024-12-01T17:09:37,305 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-01T17:09:37,309 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-01T17:09:37,309 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-01T17:09:37,310 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-01T17:09:37,311 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 140 2024-12-01T17:09:37,312 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=140, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 269 msec 2024-12-01T17:09:37,330 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-01T17:09:37,330 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion Metrics about Tables on a single HBase RegionServer 2024-12-01T17:09:37,330 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-01T17:09:37,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-01T17:09:37,360 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-01T17:09:37,364 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50696, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-01T17:09:37,364 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50768, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-01T17:09:37,364 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38344, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-01T17:09:37,366 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testtb-testExportFileSystemStateWithMergeRegion-1', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T17:09:37,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=143, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-01T17:09:37,368 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_PRE_OPERATION 2024-12-01T17:09:37,368 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:09:37,368 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion-1" procId is: 143 2024-12-01T17:09:37,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-01T17:09:37,369 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-01T17:09:37,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742160_1336 (size=399) 2024-12-01T17:09:37,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742160_1336 (size=399) 2024-12-01T17:09:37,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742160_1336 (size=399) 2024-12-01T17:09:37,380 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 776a1b19012200a1457f3b003039fdac, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977365.776a1b19012200a1457f3b003039fdac.', STARTKEY => '', ENDKEY => '2'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:09:37,381 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 45268bd4bd05ba72165dccc5445bee5a, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1733072977365.45268bd4bd05ba72165dccc5445bee5a.', STARTKEY => '2', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:09:37,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742161_1337 (size=85) 2024-12-01T17:09:37,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742161_1337 (size=85) 2024-12-01T17:09:37,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742161_1337 (size=85) 2024-12-01T17:09:37,391 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977365.776a1b19012200a1457f3b003039fdac.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:09:37,391 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1722): Closing 776a1b19012200a1457f3b003039fdac, disabling compactions & flushes 2024-12-01T17:09:37,392 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977365.776a1b19012200a1457f3b003039fdac. 2024-12-01T17:09:37,392 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977365.776a1b19012200a1457f3b003039fdac. 2024-12-01T17:09:37,392 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977365.776a1b19012200a1457f3b003039fdac. after waiting 0 ms 2024-12-01T17:09:37,392 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977365.776a1b19012200a1457f3b003039fdac. 2024-12-01T17:09:37,392 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977365.776a1b19012200a1457f3b003039fdac. 2024-12-01T17:09:37,392 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1676): Region close journal for 776a1b19012200a1457f3b003039fdac: Waiting for close lock at 1733072977391Disabling compacts and flushes for region at 1733072977391Disabling writes for close at 1733072977392 (+1 ms)Writing region close event to WAL at 1733072977392Closed at 1733072977392 2024-12-01T17:09:37,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742162_1338 (size=85) 2024-12-01T17:09:37,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742162_1338 (size=85) 2024-12-01T17:09:37,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742162_1338 (size=85) 2024-12-01T17:09:37,399 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1733072977365.45268bd4bd05ba72165dccc5445bee5a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:09:37,399 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1722): Closing 45268bd4bd05ba72165dccc5445bee5a, disabling compactions & flushes 2024-12-01T17:09:37,399 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733072977365.45268bd4bd05ba72165dccc5445bee5a. 2024-12-01T17:09:37,399 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733072977365.45268bd4bd05ba72165dccc5445bee5a. 2024-12-01T17:09:37,399 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733072977365.45268bd4bd05ba72165dccc5445bee5a. after waiting 0 ms 2024-12-01T17:09:37,399 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733072977365.45268bd4bd05ba72165dccc5445bee5a. 2024-12-01T17:09:37,399 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1733072977365.45268bd4bd05ba72165dccc5445bee5a. 2024-12-01T17:09:37,399 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1676): Region close journal for 45268bd4bd05ba72165dccc5445bee5a: Waiting for close lock at 1733072977399Disabling compacts and flushes for region at 1733072977399Disabling writes for close at 1733072977399Writing region close event to WAL at 1733072977399Closed at 1733072977399 2024-12-01T17:09:37,400 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ADD_TO_META 2024-12-01T17:09:37,400 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977365.776a1b19012200a1457f3b003039fdac.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1733072977400"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733072977400"}]},"ts":"1733072977400"} 2024-12-01T17:09:37,400 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1733072977365.45268bd4bd05ba72165dccc5445bee5a.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1733072977400"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733072977400"}]},"ts":"1733072977400"} 2024-12-01T17:09:37,402 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-01T17:09:37,403 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-01T17:09:37,403 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733072977403"}]},"ts":"1733072977403"} 2024-12-01T17:09:37,405 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLING in hbase:meta 2024-12-01T17:09:37,405 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {b87883a7882d=0} racks are {/default-rack=0} 2024-12-01T17:09:37,406 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-01T17:09:37,406 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-01T17:09:37,406 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-01T17:09:37,406 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-01T17:09:37,406 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-01T17:09:37,406 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-01T17:09:37,406 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-01T17:09:37,406 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-01T17:09:37,406 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-01T17:09:37,406 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-01T17:09:37,406 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=776a1b19012200a1457f3b003039fdac, ASSIGN}, {pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=45268bd4bd05ba72165dccc5445bee5a, ASSIGN}] 2024-12-01T17:09:37,407 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=45268bd4bd05ba72165dccc5445bee5a, ASSIGN 2024-12-01T17:09:37,407 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=776a1b19012200a1457f3b003039fdac, ASSIGN 2024-12-01T17:09:37,408 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=776a1b19012200a1457f3b003039fdac, ASSIGN; state=OFFLINE, location=b87883a7882d,36521,1733072737896; forceNewPlan=false, retain=false 2024-12-01T17:09:37,408 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=45268bd4bd05ba72165dccc5445bee5a, ASSIGN; state=OFFLINE, location=b87883a7882d,44753,1733072737741; forceNewPlan=false, retain=false 2024-12-01T17:09:37,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-01T17:09:37,559 INFO [b87883a7882d:38737 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-01T17:09:37,559 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=776a1b19012200a1457f3b003039fdac, regionState=OPENING, regionLocation=b87883a7882d,36521,1733072737896 2024-12-01T17:09:37,559 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=145 updating hbase:meta row=45268bd4bd05ba72165dccc5445bee5a, regionState=OPENING, regionLocation=b87883a7882d,44753,1733072737741 2024-12-01T17:09:37,563 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=776a1b19012200a1457f3b003039fdac, ASSIGN because future has completed 2024-12-01T17:09:37,564 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=146, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure 776a1b19012200a1457f3b003039fdac, server=b87883a7882d,36521,1733072737896}] 2024-12-01T17:09:37,565 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=45268bd4bd05ba72165dccc5445bee5a, ASSIGN because future has completed 2024-12-01T17:09:37,565 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=147, ppid=145, state=RUNNABLE, hasLock=false; OpenRegionProcedure 45268bd4bd05ba72165dccc5445bee5a, server=b87883a7882d,44753,1733072737741}] 2024-12-01T17:09:37,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-01T17:09:37,723 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977365.776a1b19012200a1457f3b003039fdac. 2024-12-01T17:09:37,723 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7752): Opening region: {ENCODED => 776a1b19012200a1457f3b003039fdac, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977365.776a1b19012200a1457f3b003039fdac.', STARTKEY => '', ENDKEY => '2'} 2024-12-01T17:09:37,724 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,2,1733072977365.45268bd4bd05ba72165dccc5445bee5a. 2024-12-01T17:09:37,724 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977365.776a1b19012200a1457f3b003039fdac. service=AccessControlService 2024-12-01T17:09:37,724 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7752): Opening region: {ENCODED => 45268bd4bd05ba72165dccc5445bee5a, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1733072977365.45268bd4bd05ba72165dccc5445bee5a.', STARTKEY => '2', ENDKEY => ''} 2024-12-01T17:09:37,724 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-01T17:09:37,725 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 776a1b19012200a1457f3b003039fdac 2024-12-01T17:09:37,725 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1733072977365.45268bd4bd05ba72165dccc5445bee5a. service=AccessControlService 2024-12-01T17:09:37,725 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977365.776a1b19012200a1457f3b003039fdac.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:09:37,725 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7794): checking encryption for 776a1b19012200a1457f3b003039fdac 2024-12-01T17:09:37,725 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-01T17:09:37,725 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7797): checking classloading for 776a1b19012200a1457f3b003039fdac 2024-12-01T17:09:37,725 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 45268bd4bd05ba72165dccc5445bee5a 2024-12-01T17:09:37,725 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1733072977365.45268bd4bd05ba72165dccc5445bee5a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:09:37,726 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7794): checking encryption for 45268bd4bd05ba72165dccc5445bee5a 2024-12-01T17:09:37,726 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7797): checking classloading for 45268bd4bd05ba72165dccc5445bee5a 2024-12-01T17:09:37,727 INFO [StoreOpener-776a1b19012200a1457f3b003039fdac-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 776a1b19012200a1457f3b003039fdac 2024-12-01T17:09:37,727 INFO [StoreOpener-45268bd4bd05ba72165dccc5445bee5a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 45268bd4bd05ba72165dccc5445bee5a 2024-12-01T17:09:37,729 INFO [StoreOpener-776a1b19012200a1457f3b003039fdac-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 776a1b19012200a1457f3b003039fdac columnFamilyName cf 2024-12-01T17:09:37,729 INFO [StoreOpener-45268bd4bd05ba72165dccc5445bee5a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 45268bd4bd05ba72165dccc5445bee5a columnFamilyName cf 2024-12-01T17:09:37,729 DEBUG [StoreOpener-45268bd4bd05ba72165dccc5445bee5a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:09:37,729 DEBUG [StoreOpener-776a1b19012200a1457f3b003039fdac-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:09:37,730 INFO [StoreOpener-45268bd4bd05ba72165dccc5445bee5a-1 {}] regionserver.HStore(327): Store=45268bd4bd05ba72165dccc5445bee5a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T17:09:37,730 INFO [StoreOpener-776a1b19012200a1457f3b003039fdac-1 {}] regionserver.HStore(327): Store=776a1b19012200a1457f3b003039fdac/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T17:09:37,730 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1038): replaying wal for 45268bd4bd05ba72165dccc5445bee5a 2024-12-01T17:09:37,730 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1038): replaying wal for 776a1b19012200a1457f3b003039fdac 2024-12-01T17:09:37,731 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45268bd4bd05ba72165dccc5445bee5a 2024-12-01T17:09:37,731 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/776a1b19012200a1457f3b003039fdac 2024-12-01T17:09:37,731 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45268bd4bd05ba72165dccc5445bee5a 2024-12-01T17:09:37,731 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/776a1b19012200a1457f3b003039fdac 2024-12-01T17:09:37,731 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1048): stopping wal replay for 45268bd4bd05ba72165dccc5445bee5a 2024-12-01T17:09:37,731 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1060): Cleaning up temporary data for 45268bd4bd05ba72165dccc5445bee5a 2024-12-01T17:09:37,732 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1048): stopping wal replay for 776a1b19012200a1457f3b003039fdac 2024-12-01T17:09:37,732 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1060): Cleaning up temporary data for 776a1b19012200a1457f3b003039fdac 2024-12-01T17:09:37,733 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1093): writing seq id for 45268bd4bd05ba72165dccc5445bee5a 2024-12-01T17:09:37,733 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1093): writing seq id for 776a1b19012200a1457f3b003039fdac 2024-12-01T17:09:37,735 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45268bd4bd05ba72165dccc5445bee5a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T17:09:37,735 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/776a1b19012200a1457f3b003039fdac/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T17:09:37,735 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1114): Opened 45268bd4bd05ba72165dccc5445bee5a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64086511, jitterRate=-0.045036569237709045}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T17:09:37,735 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 45268bd4bd05ba72165dccc5445bee5a 2024-12-01T17:09:37,735 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1114): Opened 776a1b19012200a1457f3b003039fdac; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61945374, jitterRate=-0.07694199681282043}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T17:09:37,735 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 776a1b19012200a1457f3b003039fdac 2024-12-01T17:09:37,736 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1006): Region open journal for 45268bd4bd05ba72165dccc5445bee5a: Running coprocessor pre-open hook at 1733072977726Writing region info on filesystem at 1733072977726Initializing all the Stores at 1733072977727 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733072977727Cleaning up temporary data from old regions at 1733072977731 (+4 ms)Running coprocessor post-open hooks at 1733072977735 (+4 ms)Region opened successfully at 1733072977736 (+1 ms) 2024-12-01T17:09:37,736 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1006): Region open journal for 776a1b19012200a1457f3b003039fdac: Running coprocessor pre-open hook at 1733072977725Writing region info on filesystem at 1733072977725Initializing all the Stores at 1733072977727 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733072977727Cleaning up temporary data from old regions at 1733072977732 (+5 ms)Running coprocessor post-open hooks at 1733072977735 (+3 ms)Region opened successfully at 1733072977736 (+1 ms) 2024-12-01T17:09:37,736 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977365.776a1b19012200a1457f3b003039fdac., pid=146, masterSystemTime=1733072977716 2024-12-01T17:09:37,736 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,2,1733072977365.45268bd4bd05ba72165dccc5445bee5a., pid=147, masterSystemTime=1733072977719 2024-12-01T17:09:37,738 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977365.776a1b19012200a1457f3b003039fdac. 2024-12-01T17:09:37,738 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977365.776a1b19012200a1457f3b003039fdac. 2024-12-01T17:09:37,739 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=776a1b19012200a1457f3b003039fdac, regionState=OPEN, openSeqNum=2, regionLocation=b87883a7882d,36521,1733072737896 2024-12-01T17:09:37,739 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,2,1733072977365.45268bd4bd05ba72165dccc5445bee5a. 2024-12-01T17:09:37,739 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,2,1733072977365.45268bd4bd05ba72165dccc5445bee5a. 2024-12-01T17:09:37,739 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=145 updating hbase:meta row=45268bd4bd05ba72165dccc5445bee5a, regionState=OPEN, openSeqNum=2, regionLocation=b87883a7882d,44753,1733072737741 2024-12-01T17:09:37,740 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=146, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure 776a1b19012200a1457f3b003039fdac, server=b87883a7882d,36521,1733072737896 because future has completed 2024-12-01T17:09:37,741 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=147, ppid=145, state=RUNNABLE, hasLock=false; OpenRegionProcedure 45268bd4bd05ba72165dccc5445bee5a, server=b87883a7882d,44753,1733072737741 because future has completed 2024-12-01T17:09:37,743 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=146, resume processing ppid=144 2024-12-01T17:09:37,743 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=146, ppid=144, state=SUCCESS, hasLock=false; OpenRegionProcedure 776a1b19012200a1457f3b003039fdac, server=b87883a7882d,36521,1733072737896 in 177 msec 2024-12-01T17:09:37,744 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=147, resume processing ppid=145 2024-12-01T17:09:37,744 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=144, ppid=143, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=776a1b19012200a1457f3b003039fdac, ASSIGN in 337 msec 2024-12-01T17:09:37,744 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=147, ppid=145, state=SUCCESS, hasLock=false; OpenRegionProcedure 45268bd4bd05ba72165dccc5445bee5a, server=b87883a7882d,44753,1733072737741 in 177 msec 2024-12-01T17:09:37,745 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=145, resume processing ppid=143 2024-12-01T17:09:37,745 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=145, ppid=143, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=45268bd4bd05ba72165dccc5445bee5a, ASSIGN in 338 msec 2024-12-01T17:09:37,746 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-01T17:09:37,746 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733072977746"}]},"ts":"1733072977746"} 2024-12-01T17:09:37,747 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLED in hbase:meta 2024-12-01T17:09:37,748 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_POST_OPERATION 2024-12-01T17:09:37,749 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion-1 jenkins: RWXCA 2024-12-01T17:09:37,751 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36521 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-01T17:09:37,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:09:37,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:09:37,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:09:37,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:09:37,842 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-01T17:09:37,842 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-01T17:09:37,842 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-01T17:09:37,842 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-01T17:09:37,843 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-01T17:09:37,843 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-01T17:09:37,843 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-01T17:09:37,843 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-01T17:09:37,844 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=143, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 475 msec 2024-12-01T17:09:38,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-01T17:09:38,001 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-01T17:09:38,008 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='1', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977365.776a1b19012200a1457f3b003039fdac., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:09:38,014 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='2', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1733072977365.45268bd4bd05ba72165dccc5445bee5a., hostname=b87883a7882d,44753,1733072737741, seqNum=2] 2024-12-01T17:09:38,016 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion-1,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion-1 ,, for max=2147483647 with caching=100 2024-12-01T17:09:38,028 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster$2(2278): Client=jenkins//172.17.0.3 merge regions [776a1b19012200a1457f3b003039fdac, 45268bd4bd05ba72165dccc5445bee5a] 2024-12-01T17:09:38,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[776a1b19012200a1457f3b003039fdac, 45268bd4bd05ba72165dccc5445bee5a], force=true 2024-12-01T17:09:38,034 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[776a1b19012200a1457f3b003039fdac, 45268bd4bd05ba72165dccc5445bee5a], force=true 2024-12-01T17:09:38,034 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[776a1b19012200a1457f3b003039fdac, 45268bd4bd05ba72165dccc5445bee5a], force=true 2024-12-01T17:09:38,034 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[776a1b19012200a1457f3b003039fdac, 45268bd4bd05ba72165dccc5445bee5a], force=true 2024-12-01T17:09:38,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-01T17:09:38,040 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=776a1b19012200a1457f3b003039fdac, UNASSIGN}, {pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=45268bd4bd05ba72165dccc5445bee5a, UNASSIGN}] 2024-12-01T17:09:38,041 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=776a1b19012200a1457f3b003039fdac, UNASSIGN 2024-12-01T17:09:38,041 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=45268bd4bd05ba72165dccc5445bee5a, UNASSIGN 2024-12-01T17:09:38,041 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=150 updating hbase:meta row=45268bd4bd05ba72165dccc5445bee5a, regionState=CLOSING, regionLocation=b87883a7882d,44753,1733072737741 2024-12-01T17:09:38,041 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=149 updating hbase:meta row=776a1b19012200a1457f3b003039fdac, regionState=CLOSING, regionLocation=b87883a7882d,36521,1733072737896 2024-12-01T17:09:38,043 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=45268bd4bd05ba72165dccc5445bee5a, UNASSIGN because future has completed 2024-12-01T17:09:38,043 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-01T17:09:38,043 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE, hasLock=false; CloseRegionProcedure 45268bd4bd05ba72165dccc5445bee5a, server=b87883a7882d,44753,1733072737741}] 2024-12-01T17:09:38,043 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=776a1b19012200a1457f3b003039fdac, UNASSIGN because future has completed 2024-12-01T17:09:38,044 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-01T17:09:38,044 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=152, ppid=149, state=RUNNABLE, hasLock=false; CloseRegionProcedure 776a1b19012200a1457f3b003039fdac, server=b87883a7882d,36521,1733072737896}] 2024-12-01T17:09:38,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-01T17:09:38,195 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(122): Close 45268bd4bd05ba72165dccc5445bee5a 2024-12-01T17:09:38,195 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(122): Close 776a1b19012200a1457f3b003039fdac 2024-12-01T17:09:38,195 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-01T17:09:38,195 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-01T17:09:38,196 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1722): Closing 45268bd4bd05ba72165dccc5445bee5a, disabling compactions & flushes 2024-12-01T17:09:38,196 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1722): Closing 776a1b19012200a1457f3b003039fdac, disabling compactions & flushes 2024-12-01T17:09:38,196 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733072977365.45268bd4bd05ba72165dccc5445bee5a. 2024-12-01T17:09:38,196 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977365.776a1b19012200a1457f3b003039fdac. 2024-12-01T17:09:38,196 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733072977365.45268bd4bd05ba72165dccc5445bee5a. 2024-12-01T17:09:38,196 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977365.776a1b19012200a1457f3b003039fdac. 2024-12-01T17:09:38,196 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733072977365.45268bd4bd05ba72165dccc5445bee5a. after waiting 0 ms 2024-12-01T17:09:38,196 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977365.776a1b19012200a1457f3b003039fdac. after waiting 0 ms 2024-12-01T17:09:38,196 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733072977365.45268bd4bd05ba72165dccc5445bee5a. 2024-12-01T17:09:38,196 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977365.776a1b19012200a1457f3b003039fdac. 2024-12-01T17:09:38,196 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(2902): Flushing 776a1b19012200a1457f3b003039fdac 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-01T17:09:38,196 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(2902): Flushing 45268bd4bd05ba72165dccc5445bee5a 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-01T17:09:38,211 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/776a1b19012200a1457f3b003039fdac/.tmp/cf/5b7b141a08bd4315ac291de725e69f02 is 28, key is 1/cf:/1733072978010/Put/seqid=0 2024-12-01T17:09:38,211 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45268bd4bd05ba72165dccc5445bee5a/.tmp/cf/4624c34245a4420cb53c6ca660dd94ad is 28, key is 2/cf:/1733072978015/Put/seqid=0 2024-12-01T17:09:38,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742163_1339 (size=4945) 2024-12-01T17:09:38,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742164_1340 (size=4945) 2024-12-01T17:09:38,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742164_1340 (size=4945) 2024-12-01T17:09:38,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742164_1340 (size=4945) 2024-12-01T17:09:38,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742163_1339 (size=4945) 2024-12-01T17:09:38,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742163_1339 (size=4945) 2024-12-01T17:09:38,217 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/776a1b19012200a1457f3b003039fdac/.tmp/cf/5b7b141a08bd4315ac291de725e69f02 2024-12-01T17:09:38,217 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45268bd4bd05ba72165dccc5445bee5a/.tmp/cf/4624c34245a4420cb53c6ca660dd94ad 2024-12-01T17:09:38,220 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45268bd4bd05ba72165dccc5445bee5a/.tmp/cf/4624c34245a4420cb53c6ca660dd94ad as hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45268bd4bd05ba72165dccc5445bee5a/cf/4624c34245a4420cb53c6ca660dd94ad 2024-12-01T17:09:38,221 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/776a1b19012200a1457f3b003039fdac/.tmp/cf/5b7b141a08bd4315ac291de725e69f02 as hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/776a1b19012200a1457f3b003039fdac/cf/5b7b141a08bd4315ac291de725e69f02 2024-12-01T17:09:38,224 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/776a1b19012200a1457f3b003039fdac/cf/5b7b141a08bd4315ac291de725e69f02, entries=1, sequenceid=5, filesize=4.8 K 2024-12-01T17:09:38,224 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45268bd4bd05ba72165dccc5445bee5a/cf/4624c34245a4420cb53c6ca660dd94ad, entries=1, sequenceid=5, filesize=4.8 K 2024-12-01T17:09:38,225 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 45268bd4bd05ba72165dccc5445bee5a in 29ms, sequenceid=5, compaction requested=false 2024-12-01T17:09:38,225 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 776a1b19012200a1457f3b003039fdac in 29ms, sequenceid=5, compaction requested=false 2024-12-01T17:09:38,225 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion-1' 2024-12-01T17:09:38,225 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion-1' 2024-12-01T17:09:38,229 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/776a1b19012200a1457f3b003039fdac/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-01T17:09:38,229 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45268bd4bd05ba72165dccc5445bee5a/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-01T17:09:38,229 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-01T17:09:38,229 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-01T17:09:38,229 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977365.776a1b19012200a1457f3b003039fdac. 2024-12-01T17:09:38,229 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1733072977365.45268bd4bd05ba72165dccc5445bee5a. 2024-12-01T17:09:38,229 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1676): Region close journal for 776a1b19012200a1457f3b003039fdac: Waiting for close lock at 1733072978196Running coprocessor pre-close hooks at 1733072978196Disabling compacts and flushes for region at 1733072978196Disabling writes for close at 1733072978196Obtaining lock to block concurrent updates at 1733072978196Preparing flush snapshotting stores in 776a1b19012200a1457f3b003039fdac at 1733072978196Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977365.776a1b19012200a1457f3b003039fdac., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1733072978196Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977365.776a1b19012200a1457f3b003039fdac. at 1733072978196Flushing 776a1b19012200a1457f3b003039fdac/cf: creating writer at 1733072978197 (+1 ms)Flushing 776a1b19012200a1457f3b003039fdac/cf: appending metadata at 1733072978210 (+13 ms)Flushing 776a1b19012200a1457f3b003039fdac/cf: closing flushed file at 1733072978210Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7db7719d: reopening flushed file at 1733072978220 (+10 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 776a1b19012200a1457f3b003039fdac in 29ms, sequenceid=5, compaction requested=false at 1733072978225 (+5 ms)Writing region close event to WAL at 1733072978226 (+1 ms)Running coprocessor post-close hooks at 1733072978229 (+3 ms)Closed at 1733072978229 2024-12-01T17:09:38,229 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1676): Region close journal for 45268bd4bd05ba72165dccc5445bee5a: Waiting for close lock at 1733072978195Running coprocessor pre-close hooks at 1733072978195Disabling compacts and flushes for region at 1733072978196 (+1 ms)Disabling writes for close at 1733072978196Obtaining lock to block concurrent updates at 1733072978196Preparing flush snapshotting stores in 45268bd4bd05ba72165dccc5445bee5a at 1733072978196Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,2,1733072977365.45268bd4bd05ba72165dccc5445bee5a., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1733072978196Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,2,1733072977365.45268bd4bd05ba72165dccc5445bee5a. at 1733072978196Flushing 45268bd4bd05ba72165dccc5445bee5a/cf: creating writer at 1733072978197 (+1 ms)Flushing 45268bd4bd05ba72165dccc5445bee5a/cf: appending metadata at 1733072978210 (+13 ms)Flushing 45268bd4bd05ba72165dccc5445bee5a/cf: closing flushed file at 1733072978210Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@173300f1: reopening flushed file at 1733072978220 (+10 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 45268bd4bd05ba72165dccc5445bee5a in 29ms, sequenceid=5, compaction requested=false at 1733072978225 (+5 ms)Writing region close event to WAL at 1733072978226 (+1 ms)Running coprocessor post-close hooks at 1733072978229 (+3 ms)Closed at 1733072978229 2024-12-01T17:09:38,231 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(157): Closed 45268bd4bd05ba72165dccc5445bee5a 2024-12-01T17:09:38,231 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=150 updating hbase:meta row=45268bd4bd05ba72165dccc5445bee5a, regionState=CLOSED 2024-12-01T17:09:38,231 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(157): Closed 776a1b19012200a1457f3b003039fdac 2024-12-01T17:09:38,232 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=149 updating hbase:meta row=776a1b19012200a1457f3b003039fdac, regionState=CLOSED 2024-12-01T17:09:38,234 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=151, ppid=150, state=RUNNABLE, hasLock=false; CloseRegionProcedure 45268bd4bd05ba72165dccc5445bee5a, server=b87883a7882d,44753,1733072737741 because future has completed 2024-12-01T17:09:38,235 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=152, ppid=149, state=RUNNABLE, hasLock=false; CloseRegionProcedure 776a1b19012200a1457f3b003039fdac, server=b87883a7882d,36521,1733072737896 because future has completed 2024-12-01T17:09:38,236 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=151, resume processing ppid=150 2024-12-01T17:09:38,236 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=151, ppid=150, state=SUCCESS, hasLock=false; CloseRegionProcedure 45268bd4bd05ba72165dccc5445bee5a, server=b87883a7882d,44753,1733072737741 in 192 msec 2024-12-01T17:09:38,237 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=152, resume processing ppid=149 2024-12-01T17:09:38,237 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=150, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=45268bd4bd05ba72165dccc5445bee5a, UNASSIGN in 196 msec 2024-12-01T17:09:38,238 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=152, ppid=149, state=SUCCESS, hasLock=false; CloseRegionProcedure 776a1b19012200a1457f3b003039fdac, server=b87883a7882d,36521,1733072737896 in 192 msec 2024-12-01T17:09:38,239 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=149, resume processing ppid=148 2024-12-01T17:09:38,239 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=149, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=776a1b19012200a1457f3b003039fdac, UNASSIGN in 198 msec 2024-12-01T17:09:38,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742165_1341 (size=84) 2024-12-01T17:09:38,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742165_1341 (size=84) 2024-12-01T17:09:38,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742165_1341 (size=84) 2024-12-01T17:09:38,251 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:09:38,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742166_1342 (size=20) 2024-12-01T17:09:38,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742166_1342 (size=20) 2024-12-01T17:09:38,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742166_1342 (size=20) 2024-12-01T17:09:38,259 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:09:38,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742167_1343 (size=21) 2024-12-01T17:09:38,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742167_1343 (size=21) 2024-12-01T17:09:38,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742167_1343 (size=21) 2024-12-01T17:09:38,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742168_1344 (size=84) 2024-12-01T17:09:38,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742168_1344 (size=84) 2024-12-01T17:09:38,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742168_1344 (size=84) 2024-12-01T17:09:38,271 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:09:38,278 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2c5ba4cec0c1a2c8690fe97339265e40/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=-1 2024-12-01T17:09:38,280 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977365.776a1b19012200a1457f3b003039fdac.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-01T17:09:38,280 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1733072977365.45268bd4bd05ba72165dccc5445bee5a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-01T17:09:38,280 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":7,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977366.2c5ba4cec0c1a2c8690fe97339265e40.","families":{"info":[{"qualifier":"regioninfo","vlen":83,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0000","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0001","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-01T17:09:38,284 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2c5ba4cec0c1a2c8690fe97339265e40, ASSIGN}] 2024-12-01T17:09:38,284 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2c5ba4cec0c1a2c8690fe97339265e40, ASSIGN 2024-12-01T17:09:38,285 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2c5ba4cec0c1a2c8690fe97339265e40, ASSIGN; state=MERGED, location=b87883a7882d,36521,1733072737896; forceNewPlan=false, retain=false 2024-12-01T17:09:38,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-01T17:09:38,435 INFO [b87883a7882d:38737 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-01T17:09:38,436 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=2c5ba4cec0c1a2c8690fe97339265e40, regionState=OPENING, regionLocation=b87883a7882d,36521,1733072737896 2024-12-01T17:09:38,439 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2c5ba4cec0c1a2c8690fe97339265e40, ASSIGN because future has completed 2024-12-01T17:09:38,439 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2c5ba4cec0c1a2c8690fe97339265e40, server=b87883a7882d,36521,1733072737896}] 2024-12-01T17:09:38,601 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977366.2c5ba4cec0c1a2c8690fe97339265e40. 2024-12-01T17:09:38,601 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7752): Opening region: {ENCODED => 2c5ba4cec0c1a2c8690fe97339265e40, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977366.2c5ba4cec0c1a2c8690fe97339265e40.', STARTKEY => '', ENDKEY => ''} 2024-12-01T17:09:38,601 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977366.2c5ba4cec0c1a2c8690fe97339265e40. service=AccessControlService 2024-12-01T17:09:38,602 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-01T17:09:38,602 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 2c5ba4cec0c1a2c8690fe97339265e40 2024-12-01T17:09:38,602 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977366.2c5ba4cec0c1a2c8690fe97339265e40.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:09:38,602 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7794): checking encryption for 2c5ba4cec0c1a2c8690fe97339265e40 2024-12-01T17:09:38,602 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7797): checking classloading for 2c5ba4cec0c1a2c8690fe97339265e40 2024-12-01T17:09:38,605 INFO [StoreOpener-2c5ba4cec0c1a2c8690fe97339265e40-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 2c5ba4cec0c1a2c8690fe97339265e40 2024-12-01T17:09:38,607 INFO [StoreOpener-2c5ba4cec0c1a2c8690fe97339265e40-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2c5ba4cec0c1a2c8690fe97339265e40 columnFamilyName cf 2024-12-01T17:09:38,607 DEBUG [StoreOpener-2c5ba4cec0c1a2c8690fe97339265e40-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:09:38,617 DEBUG [StoreOpener-2c5ba4cec0c1a2c8690fe97339265e40-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2c5ba4cec0c1a2c8690fe97339265e40/cf/4624c34245a4420cb53c6ca660dd94ad.45268bd4bd05ba72165dccc5445bee5a->hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45268bd4bd05ba72165dccc5445bee5a/cf/4624c34245a4420cb53c6ca660dd94ad-top 2024-12-01T17:09:38,622 DEBUG [StoreOpener-2c5ba4cec0c1a2c8690fe97339265e40-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2c5ba4cec0c1a2c8690fe97339265e40/cf/5b7b141a08bd4315ac291de725e69f02.776a1b19012200a1457f3b003039fdac->hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/776a1b19012200a1457f3b003039fdac/cf/5b7b141a08bd4315ac291de725e69f02-top 2024-12-01T17:09:38,623 INFO [StoreOpener-2c5ba4cec0c1a2c8690fe97339265e40-1 {}] regionserver.HStore(327): Store=2c5ba4cec0c1a2c8690fe97339265e40/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T17:09:38,623 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1038): replaying wal for 2c5ba4cec0c1a2c8690fe97339265e40 2024-12-01T17:09:38,623 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2c5ba4cec0c1a2c8690fe97339265e40 2024-12-01T17:09:38,624 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2c5ba4cec0c1a2c8690fe97339265e40 2024-12-01T17:09:38,625 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1048): stopping wal replay for 2c5ba4cec0c1a2c8690fe97339265e40 2024-12-01T17:09:38,625 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1060): Cleaning up temporary data for 2c5ba4cec0c1a2c8690fe97339265e40 2024-12-01T17:09:38,627 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1093): writing seq id for 2c5ba4cec0c1a2c8690fe97339265e40 2024-12-01T17:09:38,627 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1114): Opened 2c5ba4cec0c1a2c8690fe97339265e40; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67900767, jitterRate=0.01180027425289154}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T17:09:38,627 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 2c5ba4cec0c1a2c8690fe97339265e40 2024-12-01T17:09:38,628 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1006): Region open journal for 2c5ba4cec0c1a2c8690fe97339265e40: Running coprocessor pre-open hook at 1733072978603Writing region info on filesystem at 1733072978603Initializing all the Stores at 1733072978604 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733072978604Cleaning up temporary data from old regions at 1733072978625 (+21 ms)Running coprocessor post-open hooks at 1733072978627 (+2 ms)Region opened successfully at 1733072978628 (+1 ms) 2024-12-01T17:09:38,628 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977366.2c5ba4cec0c1a2c8690fe97339265e40., pid=154, masterSystemTime=1733072978593 2024-12-01T17:09:38,628 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.CompactSplit(342): Ignoring compaction request for testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977366.2c5ba4cec0c1a2c8690fe97339265e40.,because compaction is disabled. 2024-12-01T17:09:38,630 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977366.2c5ba4cec0c1a2c8690fe97339265e40. 2024-12-01T17:09:38,630 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977366.2c5ba4cec0c1a2c8690fe97339265e40. 2024-12-01T17:09:38,631 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=2c5ba4cec0c1a2c8690fe97339265e40, regionState=OPEN, openSeqNum=9, regionLocation=b87883a7882d,36521,1733072737896 2024-12-01T17:09:38,632 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=154, ppid=153, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2c5ba4cec0c1a2c8690fe97339265e40, server=b87883a7882d,36521,1733072737896 because future has completed 2024-12-01T17:09:38,634 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=154, resume processing ppid=153 2024-12-01T17:09:38,634 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=154, ppid=153, state=SUCCESS, hasLock=false; OpenRegionProcedure 2c5ba4cec0c1a2c8690fe97339265e40, server=b87883a7882d,36521,1733072737896 in 194 msec 2024-12-01T17:09:38,636 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=153, resume processing ppid=148 2024-12-01T17:09:38,636 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=153, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2c5ba4cec0c1a2c8690fe97339265e40, ASSIGN in 350 msec 2024-12-01T17:09:38,638 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=148, state=SUCCESS, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[776a1b19012200a1457f3b003039fdac, 45268bd4bd05ba72165dccc5445bee5a], force=true in 606 msec 2024-12-01T17:09:38,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-01T17:09:38,660 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: MERGE_REGIONS, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-01T17:09:38,661 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-01T17:09:38,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733072978661 (current time:1733072978661). 2024-12-01T17:09:38,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-01T17:09:38,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 VERSION not specified, setting to 2 2024-12-01T17:09:38,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-01T17:09:38,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c74c604, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:09:38,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:09:38,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:09:38,662 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:09:38,662 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:09:38,662 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:09:38,662 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@404920b5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:09:38,662 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:09:38,663 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:09:38,663 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:09:38,663 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58652, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:09:38,664 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d1649f0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:09:38,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:09:38,664 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:09:38,665 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:09:38,665 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50776, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:09:38,666 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737. 2024-12-01T17:09:38,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:09:38,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:09:38,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:09:38,666 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:09:38,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44dae3a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:09:38,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:09:38,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:09:38,668 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:09:38,668 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:09:38,668 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:09:38,668 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1af7ad2e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:09:38,668 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:09:38,668 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:09:38,669 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:09:38,669 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58668, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:09:38,670 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6fecdd83, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:09:38,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:09:38,671 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:09:38,671 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:09:38,672 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50784, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:09:38,673 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion-1', locateType=CURRENT is [region=hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:09:38,674 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:09:38,675 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38356, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:09:38,676 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737. 2024-12-01T17:09:38,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor278.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:09:38,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:09:38,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:09:38,676 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:09:38,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-01T17:09:38,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-01T17:09:38,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=155, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-01T17:09:38,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 155 2024-12-01T17:09:38,679 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-01T17:09:38,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-12-01T17:09:38,680 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-01T17:09:38,683 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-01T17:09:38,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742169_1345 (size=216) 2024-12-01T17:09:38,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742169_1345 (size=216) 2024-12-01T17:09:38,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742169_1345 (size=216) 2024-12-01T17:09:38,690 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-01T17:09:38,690 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2c5ba4cec0c1a2c8690fe97339265e40}] 2024-12-01T17:09:38,692 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2c5ba4cec0c1a2c8690fe97339265e40 2024-12-01T17:09:38,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-12-01T17:09:38,843 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36521 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=156 2024-12-01T17:09:38,843 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977366.2c5ba4cec0c1a2c8690fe97339265e40. 2024-12-01T17:09:38,844 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.HRegion(2603): Flush status journal for 2c5ba4cec0c1a2c8690fe97339265e40: 2024-12-01T17:09:38,844 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977366.2c5ba4cec0c1a2c8690fe97339265e40. for snaptb0-testExportFileSystemStateWithMergeRegion-1 completed. 2024-12-01T17:09:38,844 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977366.2c5ba4cec0c1a2c8690fe97339265e40.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-01T17:09:38,844 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:09:38,844 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2c5ba4cec0c1a2c8690fe97339265e40/cf/4624c34245a4420cb53c6ca660dd94ad.45268bd4bd05ba72165dccc5445bee5a->hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45268bd4bd05ba72165dccc5445bee5a/cf/4624c34245a4420cb53c6ca660dd94ad-top, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2c5ba4cec0c1a2c8690fe97339265e40/cf/5b7b141a08bd4315ac291de725e69f02.776a1b19012200a1457f3b003039fdac->hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/776a1b19012200a1457f3b003039fdac/cf/5b7b141a08bd4315ac291de725e69f02-top] hfiles 2024-12-01T17:09:38,844 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(265): Adding reference for file (1/2): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2c5ba4cec0c1a2c8690fe97339265e40/cf/4624c34245a4420cb53c6ca660dd94ad.45268bd4bd05ba72165dccc5445bee5a for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-01T17:09:38,845 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(265): Adding reference for file (2/2): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2c5ba4cec0c1a2c8690fe97339265e40/cf/5b7b141a08bd4315ac291de725e69f02.776a1b19012200a1457f3b003039fdac for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-01T17:09:38,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742170_1346 (size=269) 2024-12-01T17:09:38,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742170_1346 (size=269) 2024-12-01T17:09:38,855 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977366.2c5ba4cec0c1a2c8690fe97339265e40. 2024-12-01T17:09:38,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742170_1346 (size=269) 2024-12-01T17:09:38,855 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=156 2024-12-01T17:09:38,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=156 2024-12-01T17:09:38,856 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 on region 2c5ba4cec0c1a2c8690fe97339265e40 2024-12-01T17:09:38,856 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2c5ba4cec0c1a2c8690fe97339265e40 2024-12-01T17:09:38,858 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=156, resume processing ppid=155 2024-12-01T17:09:38,858 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=156, ppid=155, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 2c5ba4cec0c1a2c8690fe97339265e40 in 167 msec 2024-12-01T17:09:38,858 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-01T17:09:38,859 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-01T17:09:38,860 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-01T17:09:38,860 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-01T17:09:38,860 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-01T17:09:38,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742171_1347 (size=670) 2024-12-01T17:09:38,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742171_1347 (size=670) 2024-12-01T17:09:38,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742171_1347 (size=670) 2024-12-01T17:09:38,873 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-01T17:09:38,878 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-01T17:09:38,878 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-01T17:09:38,880 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-01T17:09:38,880 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 155 2024-12-01T17:09:38,881 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=155, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } in 203 msec 2024-12-01T17:09:39,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-12-01T17:09:39,000 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-01T17:09:39,001 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072979001 2024-12-01T17:09:39,001 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:44451, tgtDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072979001, rawTgtDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072979001, srcFsUri=hdfs://localhost:44451, srcDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:09:39,036 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:44451, inputRoot=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:09:39,036 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-69539515_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072979001, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072979001/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-01T17:09:39,038 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-01T17:09:39,042 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072979001/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-01T17:09:39,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742172_1348 (size=216) 2024-12-01T17:09:39,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742172_1348 (size=216) 2024-12-01T17:09:39,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742172_1348 (size=216) 2024-12-01T17:09:39,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742173_1349 (size=670) 2024-12-01T17:09:39,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742173_1349 (size=670) 2024-12-01T17:09:39,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742173_1349 (size=670) 2024-12-01T17:09:39,069 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:09:39,069 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:09:39,069 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:09:39,614 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0006_000001 (auth:SIMPLE) from 127.0.0.1:46494 2024-12-01T17:09:39,626 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_0/usercache/jenkins/appcache/application_1733072744857_0006/container_1733072744857_0006_01_000001/launch_container.sh] 2024-12-01T17:09:39,626 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_0/usercache/jenkins/appcache/application_1733072744857_0006/container_1733072744857_0006_01_000001/container_tokens] 2024-12-01T17:09:39,626 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_0/usercache/jenkins/appcache/application_1733072744857_0006/container_1733072744857_0006_01_000001/sysfs] 2024-12-01T17:09:40,037 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/hadoop-13808363947541111409.jar 2024-12-01T17:09:40,037 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:09:40,038 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:09:40,063 WARN [regionserver/b87883a7882d:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 2, running: 1 2024-12-01T17:09:40,118 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/hadoop-13358670813465054977.jar 2024-12-01T17:09:40,118 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:09:40,119 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:09:40,119 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:09:40,120 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:09:40,120 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:09:40,120 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:09:40,121 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-01T17:09:40,121 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-01T17:09:40,121 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-01T17:09:40,122 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-01T17:09:40,122 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-01T17:09:40,122 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-01T17:09:40,123 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-01T17:09:40,123 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-01T17:09:40,123 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-01T17:09:40,124 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-01T17:09:40,124 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-01T17:09:40,124 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:09:40,125 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:09:40,125 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-01T17:09:40,125 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:09:40,126 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:09:40,126 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-01T17:09:40,126 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-01T17:09:40,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742174_1350 (size=24020) 2024-12-01T17:09:40,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742174_1350 (size=24020) 2024-12-01T17:09:40,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742174_1350 (size=24020) 2024-12-01T17:09:40,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742175_1351 (size=77755) 2024-12-01T17:09:40,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742175_1351 (size=77755) 2024-12-01T17:09:40,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742175_1351 (size=77755) 2024-12-01T17:09:40,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742176_1352 (size=131360) 2024-12-01T17:09:40,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742176_1352 (size=131360) 2024-12-01T17:09:40,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742176_1352 (size=131360) 2024-12-01T17:09:40,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742177_1353 (size=111793) 2024-12-01T17:09:40,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742177_1353 (size=111793) 2024-12-01T17:09:40,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742177_1353 (size=111793) 2024-12-01T17:09:40,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742178_1354 (size=1832290) 2024-12-01T17:09:40,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742178_1354 (size=1832290) 2024-12-01T17:09:40,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742178_1354 (size=1832290) 2024-12-01T17:09:40,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742179_1355 (size=8360005) 2024-12-01T17:09:40,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742179_1355 (size=8360005) 2024-12-01T17:09:40,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742179_1355 (size=8360005) 2024-12-01T17:09:40,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742180_1356 (size=503880) 2024-12-01T17:09:40,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742180_1356 (size=503880) 2024-12-01T17:09:40,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742180_1356 (size=503880) 2024-12-01T17:09:40,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742181_1357 (size=322274) 2024-12-01T17:09:40,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742181_1357 (size=322274) 2024-12-01T17:09:40,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742181_1357 (size=322274) 2024-12-01T17:09:40,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742182_1358 (size=20406) 2024-12-01T17:09:40,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742182_1358 (size=20406) 2024-12-01T17:09:40,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742182_1358 (size=20406) 2024-12-01T17:09:40,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742183_1359 (size=45609) 2024-12-01T17:09:40,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742183_1359 (size=45609) 2024-12-01T17:09:40,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742183_1359 (size=45609) 2024-12-01T17:09:40,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742184_1360 (size=136454) 2024-12-01T17:09:40,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742184_1360 (size=136454) 2024-12-01T17:09:40,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742184_1360 (size=136454) 2024-12-01T17:09:41,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742185_1361 (size=1597136) 2024-12-01T17:09:41,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742185_1361 (size=1597136) 2024-12-01T17:09:41,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742185_1361 (size=1597136) 2024-12-01T17:09:41,090 DEBUG [master/b87883a7882d:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 9c76487e5a72f5c3f3d5290677627bc3 changed from -1.0 to 0.0, refreshing cache 2024-12-01T17:09:41,090 DEBUG [master/b87883a7882d:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 3b1abb99d0c92014cf2359f8bca13e19 changed from -1.0 to 0.0, refreshing cache 2024-12-01T17:09:41,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742186_1362 (size=440956) 2024-12-01T17:09:41,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742186_1362 (size=440956) 2024-12-01T17:09:41,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742186_1362 (size=440956) 2024-12-01T17:09:41,153 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-01T17:09:41,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742187_1363 (size=30873) 2024-12-01T17:09:41,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742187_1363 (size=30873) 2024-12-01T17:09:41,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742187_1363 (size=30873) 2024-12-01T17:09:41,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742188_1364 (size=29229) 2024-12-01T17:09:41,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742188_1364 (size=29229) 2024-12-01T17:09:41,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742188_1364 (size=29229) 2024-12-01T17:09:41,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742189_1365 (size=903864) 2024-12-01T17:09:41,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742189_1365 (size=903864) 2024-12-01T17:09:41,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742189_1365 (size=903864) 2024-12-01T17:09:41,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742190_1366 (size=5175431) 2024-12-01T17:09:41,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742190_1366 (size=5175431) 2024-12-01T17:09:41,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742190_1366 (size=5175431) 2024-12-01T17:09:41,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742191_1367 (size=232881) 2024-12-01T17:09:41,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742191_1367 (size=232881) 2024-12-01T17:09:41,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742191_1367 (size=232881) 2024-12-01T17:09:41,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742192_1368 (size=1323991) 2024-12-01T17:09:41,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742192_1368 (size=1323991) 2024-12-01T17:09:41,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742192_1368 (size=1323991) 2024-12-01T17:09:41,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742193_1369 (size=4695811) 2024-12-01T17:09:41,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742193_1369 (size=4695811) 2024-12-01T17:09:41,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742193_1369 (size=4695811) 2024-12-01T17:09:41,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742194_1370 (size=1877034) 2024-12-01T17:09:41,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742194_1370 (size=1877034) 2024-12-01T17:09:41,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742194_1370 (size=1877034) 2024-12-01T17:09:42,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742195_1371 (size=6424742) 2024-12-01T17:09:42,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742195_1371 (size=6424742) 2024-12-01T17:09:42,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742195_1371 (size=6424742) 2024-12-01T17:09:42,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742196_1372 (size=217555) 2024-12-01T17:09:42,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742196_1372 (size=217555) 2024-12-01T17:09:42,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742196_1372 (size=217555) 2024-12-01T17:09:42,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742197_1373 (size=4188619) 2024-12-01T17:09:42,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742197_1373 (size=4188619) 2024-12-01T17:09:42,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742197_1373 (size=4188619) 2024-12-01T17:09:42,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742198_1374 (size=127628) 2024-12-01T17:09:42,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742198_1374 (size=127628) 2024-12-01T17:09:42,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742198_1374 (size=127628) 2024-12-01T17:09:42,333 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-01T17:09:42,346 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemStateWithMergeRegion-1' hfile list 2024-12-01T17:09:42,349 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=4.8 K 2024-12-01T17:09:42,349 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=4.8 K 2024-12-01T17:09:42,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742199_1375 (size=481) 2024-12-01T17:09:42,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742199_1375 (size=481) 2024-12-01T17:09:42,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742199_1375 (size=481) 2024-12-01T17:09:42,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742200_1376 (size=21) 2024-12-01T17:09:42,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742200_1376 (size=21) 2024-12-01T17:09:42,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742200_1376 (size=21) 2024-12-01T17:09:42,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742201_1377 (size=304142) 2024-12-01T17:09:42,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742201_1377 (size=304142) 2024-12-01T17:09:42,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742201_1377 (size=304142) 2024-12-01T17:09:42,612 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-01T17:09:42,612 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-01T17:09:42,631 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0007_000001 (auth:SIMPLE) from 127.0.0.1:46482 2024-12-01T17:09:47,330 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-01T17:09:47,330 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 Metrics about Tables on a single HBase RegionServer 2024-12-01T17:09:49,542 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0007_000001 (auth:SIMPLE) from 127.0.0.1:46994 2024-12-01T17:09:49,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742202_1378 (size=349840) 2024-12-01T17:09:49,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742202_1378 (size=349840) 2024-12-01T17:09:49,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742202_1378 (size=349840) 2024-12-01T17:09:51,734 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0007_000001 (auth:SIMPLE) from 127.0.0.1:44502 2024-12-01T17:09:51,734 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0007_000001 (auth:SIMPLE) from 127.0.0.1:46494 2024-12-01T17:09:55,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742203_1379 (size=4945) 2024-12-01T17:09:55,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742203_1379 (size=4945) 2024-12-01T17:09:55,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742203_1379 (size=4945) 2024-12-01T17:09:55,214 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_0/usercache/jenkins/appcache/application_1733072744857_0007/container_1733072744857_0007_01_000002/launch_container.sh] 2024-12-01T17:09:55,214 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_0/usercache/jenkins/appcache/application_1733072744857_0007/container_1733072744857_0007_01_000002/container_tokens] 2024-12-01T17:09:55,214 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_0/usercache/jenkins/appcache/application_1733072744857_0007/container_1733072744857_0007_01_000002/sysfs] 2024-12-01T17:09:55,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742205_1381 (size=4945) 2024-12-01T17:09:55,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742205_1381 (size=4945) 2024-12-01T17:09:55,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742205_1381 (size=4945) 2024-12-01T17:09:55,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742204_1380 (size=22243) 2024-12-01T17:09:55,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742204_1380 (size=22243) 2024-12-01T17:09:55,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742204_1380 (size=22243) 2024-12-01T17:09:55,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742206_1382 (size=482) 2024-12-01T17:09:55,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742206_1382 (size=482) 2024-12-01T17:09:55,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742206_1382 (size=482) 2024-12-01T17:09:55,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742207_1383 (size=22243) 2024-12-01T17:09:55,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742207_1383 (size=22243) 2024-12-01T17:09:55,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742207_1383 (size=22243) 2024-12-01T17:09:55,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742208_1384 (size=349840) 2024-12-01T17:09:55,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742208_1384 (size=349840) 2024-12-01T17:09:55,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742208_1384 (size=349840) 2024-12-01T17:09:55,751 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0007_000001 (auth:SIMPLE) from 127.0.0.1:57748 2024-12-01T17:09:55,770 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_0/usercache/jenkins/appcache/application_1733072744857_0007/container_1733072744857_0007_01_000003/launch_container.sh] 2024-12-01T17:09:55,771 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_0/usercache/jenkins/appcache/application_1733072744857_0007/container_1733072744857_0007_01_000003/container_tokens] 2024-12-01T17:09:55,771 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_0/usercache/jenkins/appcache/application_1733072744857_0007/container_1733072744857_0007_01_000003/sysfs] 2024-12-01T17:09:57,742 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-01T17:09:57,744 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-01T17:09:57,752 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-01T17:09:57,752 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-01T17:09:57,752 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-01T17:09:57,752 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-69539515_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-01T17:09:57,753 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-01T17:09:57,753 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-01T17:09:57,753 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-69539515_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072979001/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072979001/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-01T17:09:57,753 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072979001/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-01T17:09:57,753 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733072979001/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-01T17:09:57,760 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-01T17:09:57,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=157, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-01T17:09:57,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-12-01T17:09:57,765 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733072997764"}]},"ts":"1733072997764"} 2024-12-01T17:09:57,766 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLING in hbase:meta 2024-12-01T17:09:57,766 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLING 2024-12-01T17:09:57,767 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1}] 2024-12-01T17:09:57,769 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2c5ba4cec0c1a2c8690fe97339265e40, UNASSIGN}] 2024-12-01T17:09:57,770 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2c5ba4cec0c1a2c8690fe97339265e40, UNASSIGN 2024-12-01T17:09:57,771 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=2c5ba4cec0c1a2c8690fe97339265e40, regionState=CLOSING, regionLocation=b87883a7882d,36521,1733072737896 2024-12-01T17:09:57,772 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2c5ba4cec0c1a2c8690fe97339265e40, UNASSIGN because future has completed 2024-12-01T17:09:57,773 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-01T17:09:57,773 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2c5ba4cec0c1a2c8690fe97339265e40, server=b87883a7882d,36521,1733072737896}] 2024-12-01T17:09:57,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-12-01T17:09:57,925 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(122): Close 2c5ba4cec0c1a2c8690fe97339265e40 2024-12-01T17:09:57,926 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-01T17:09:57,926 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1722): Closing 2c5ba4cec0c1a2c8690fe97339265e40, disabling compactions & flushes 2024-12-01T17:09:57,926 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977366.2c5ba4cec0c1a2c8690fe97339265e40. 2024-12-01T17:09:57,926 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977366.2c5ba4cec0c1a2c8690fe97339265e40. 2024-12-01T17:09:57,926 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977366.2c5ba4cec0c1a2c8690fe97339265e40. after waiting 0 ms 2024-12-01T17:09:57,926 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977366.2c5ba4cec0c1a2c8690fe97339265e40. 2024-12-01T17:09:57,931 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2c5ba4cec0c1a2c8690fe97339265e40/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=8 2024-12-01T17:09:57,931 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-01T17:09:57,932 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977366.2c5ba4cec0c1a2c8690fe97339265e40. 2024-12-01T17:09:57,932 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1676): Region close journal for 2c5ba4cec0c1a2c8690fe97339265e40: Waiting for close lock at 1733072997926Running coprocessor pre-close hooks at 1733072997926Disabling compacts and flushes for region at 1733072997926Disabling writes for close at 1733072997926Writing region close event to WAL at 1733072997927 (+1 ms)Running coprocessor post-close hooks at 1733072997931 (+4 ms)Closed at 1733072997932 (+1 ms) 2024-12-01T17:09:57,934 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(157): Closed 2c5ba4cec0c1a2c8690fe97339265e40 2024-12-01T17:09:57,935 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=2c5ba4cec0c1a2c8690fe97339265e40, regionState=CLOSED 2024-12-01T17:09:57,937 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=160, ppid=159, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2c5ba4cec0c1a2c8690fe97339265e40, server=b87883a7882d,36521,1733072737896 because future has completed 2024-12-01T17:09:57,941 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=160, resume processing ppid=159 2024-12-01T17:09:57,941 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=160, ppid=159, state=SUCCESS, hasLock=false; CloseRegionProcedure 2c5ba4cec0c1a2c8690fe97339265e40, server=b87883a7882d,36521,1733072737896 in 165 msec 2024-12-01T17:09:57,946 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=159, resume processing ppid=158 2024-12-01T17:09:57,946 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=159, ppid=158, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2c5ba4cec0c1a2c8690fe97339265e40, UNASSIGN in 172 msec 2024-12-01T17:09:57,949 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=158, resume processing ppid=157 2024-12-01T17:09:57,949 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=158, ppid=157, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 179 msec 2024-12-01T17:09:57,950 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733072997950"}]},"ts":"1733072997950"} 2024-12-01T17:09:57,952 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLED in hbase:meta 2024-12-01T17:09:57,952 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLED 2024-12-01T17:09:57,953 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=157, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 192 msec 2024-12-01T17:09:58,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-12-01T17:09:58,080 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-01T17:09:58,081 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-01T17:09:58,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-01T17:09:58,083 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-01T17:09:58,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-01T17:09:58,084 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=161, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-01T17:09:58,086 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36521 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-01T17:09:58,087 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2c5ba4cec0c1a2c8690fe97339265e40 2024-12-01T17:09:58,087 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/776a1b19012200a1457f3b003039fdac 2024-12-01T17:09:58,087 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45268bd4bd05ba72165dccc5445bee5a 2024-12-01T17:09:58,088 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2c5ba4cec0c1a2c8690fe97339265e40/cf, FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2c5ba4cec0c1a2c8690fe97339265e40/recovered.edits] 2024-12-01T17:09:58,088 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/776a1b19012200a1457f3b003039fdac/cf, FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/776a1b19012200a1457f3b003039fdac/recovered.edits] 2024-12-01T17:09:58,088 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45268bd4bd05ba72165dccc5445bee5a/cf, FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45268bd4bd05ba72165dccc5445bee5a/recovered.edits] 2024-12-01T17:09:58,091 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45268bd4bd05ba72165dccc5445bee5a/cf/4624c34245a4420cb53c6ca660dd94ad to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45268bd4bd05ba72165dccc5445bee5a/cf/4624c34245a4420cb53c6ca660dd94ad 2024-12-01T17:09:58,091 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2c5ba4cec0c1a2c8690fe97339265e40/cf/4624c34245a4420cb53c6ca660dd94ad.45268bd4bd05ba72165dccc5445bee5a to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2c5ba4cec0c1a2c8690fe97339265e40/cf/4624c34245a4420cb53c6ca660dd94ad.45268bd4bd05ba72165dccc5445bee5a 2024-12-01T17:09:58,091 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/776a1b19012200a1457f3b003039fdac/cf/5b7b141a08bd4315ac291de725e69f02 to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/776a1b19012200a1457f3b003039fdac/cf/5b7b141a08bd4315ac291de725e69f02 2024-12-01T17:09:58,092 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2c5ba4cec0c1a2c8690fe97339265e40/cf/5b7b141a08bd4315ac291de725e69f02.776a1b19012200a1457f3b003039fdac to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2c5ba4cec0c1a2c8690fe97339265e40/cf/5b7b141a08bd4315ac291de725e69f02.776a1b19012200a1457f3b003039fdac 2024-12-01T17:09:58,093 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45268bd4bd05ba72165dccc5445bee5a/recovered.edits/8.seqid to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45268bd4bd05ba72165dccc5445bee5a/recovered.edits/8.seqid 2024-12-01T17:09:58,094 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45268bd4bd05ba72165dccc5445bee5a 2024-12-01T17:09:58,094 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/776a1b19012200a1457f3b003039fdac/recovered.edits/8.seqid to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/776a1b19012200a1457f3b003039fdac/recovered.edits/8.seqid 2024-12-01T17:09:58,095 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2c5ba4cec0c1a2c8690fe97339265e40/recovered.edits/12.seqid to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2c5ba4cec0c1a2c8690fe97339265e40/recovered.edits/12.seqid 2024-12-01T17:09:58,095 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2c5ba4cec0c1a2c8690fe97339265e40 2024-12-01T17:09:58,096 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/776a1b19012200a1457f3b003039fdac 2024-12-01T17:09:58,096 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion-1 regions 2024-12-01T17:09:58,098 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=161, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-01T17:09:58,101 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of testtb-testExportFileSystemStateWithMergeRegion-1 from hbase:meta 2024-12-01T17:09:58,104 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' descriptor. 2024-12-01T17:09:58,105 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=161, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-01T17:09:58,105 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' from region states. 2024-12-01T17:09:58,105 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977366.2c5ba4cec0c1a2c8690fe97339265e40.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733072998105"}]},"ts":"9223372036854775807"} 2024-12-01T17:09:58,107 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 1 regions from META 2024-12-01T17:09:58,107 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 2c5ba4cec0c1a2c8690fe97339265e40, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977366.2c5ba4cec0c1a2c8690fe97339265e40.', STARTKEY => '', ENDKEY => ''}] 2024-12-01T17:09:58,107 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion-1' as deleted. 2024-12-01T17:09:58,107 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733072998107"}]},"ts":"9223372036854775807"} 2024-12-01T17:09:58,109 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion-1 state from META 2024-12-01T17:09:58,109 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=161, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-01T17:09:58,111 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=161, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 28 msec 2024-12-01T17:09:58,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-01T17:09:58,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-01T17:09:58,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-01T17:09:58,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-01T17:09:58,149 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-01T17:09:58,149 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-01T17:09:58,149 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-01T17:09:58,149 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-01T17:09:58,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-01T17:09:58,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-01T17:09:58,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-01T17:09:58,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-01T17:09:58,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:09:58,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:09:58,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:09:58,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:09:58,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=161 2024-12-01T17:09:58,160 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-01T17:09:58,160 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-01T17:09:58,160 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-01T17:09:58,161 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testtb-testExportFileSystemStateWithMergeRegion 2024-12-01T17:09:58,161 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-01T17:09:58,161 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-01T17:09:58,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=162, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-01T17:09:58,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-12-01T17:09:58,163 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733072998163"}]},"ts":"1733072998163"} 2024-12-01T17:09:58,164 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-01T17:09:58,165 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLING in hbase:meta 2024-12-01T17:09:58,165 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLING 2024-12-01T17:09:58,165 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion}] 2024-12-01T17:09:58,167 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=3b1abb99d0c92014cf2359f8bca13e19, UNASSIGN}, {pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=9c76487e5a72f5c3f3d5290677627bc3, UNASSIGN}] 2024-12-01T17:09:58,167 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=3b1abb99d0c92014cf2359f8bca13e19, UNASSIGN 2024-12-01T17:09:58,167 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=9c76487e5a72f5c3f3d5290677627bc3, UNASSIGN 2024-12-01T17:09:58,168 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=164 updating hbase:meta row=3b1abb99d0c92014cf2359f8bca13e19, regionState=CLOSING, regionLocation=b87883a7882d,44753,1733072737741 2024-12-01T17:09:58,168 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=165 updating hbase:meta row=9c76487e5a72f5c3f3d5290677627bc3, regionState=CLOSING, regionLocation=b87883a7882d,41375,1733072737967 2024-12-01T17:09:58,169 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=3b1abb99d0c92014cf2359f8bca13e19, UNASSIGN because future has completed 2024-12-01T17:09:58,169 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-01T17:09:58,169 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=166, ppid=164, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3b1abb99d0c92014cf2359f8bca13e19, server=b87883a7882d,44753,1733072737741}] 2024-12-01T17:09:58,169 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=9c76487e5a72f5c3f3d5290677627bc3, UNASSIGN because future has completed 2024-12-01T17:09:58,170 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-01T17:09:58,170 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=167, ppid=165, state=RUNNABLE, hasLock=false; CloseRegionProcedure 9c76487e5a72f5c3f3d5290677627bc3, server=b87883a7882d,41375,1733072737967}] 2024-12-01T17:09:58,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-12-01T17:09:58,321 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(122): Close 3b1abb99d0c92014cf2359f8bca13e19 2024-12-01T17:09:58,321 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-01T17:09:58,322 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1722): Closing 3b1abb99d0c92014cf2359f8bca13e19, disabling compactions & flushes 2024-12-01T17:09:58,322 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1733072975977.3b1abb99d0c92014cf2359f8bca13e19. 2024-12-01T17:09:58,322 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733072975977.3b1abb99d0c92014cf2359f8bca13e19. 2024-12-01T17:09:58,322 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733072975977.3b1abb99d0c92014cf2359f8bca13e19. after waiting 0 ms 2024-12-01T17:09:58,322 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1733072975977.3b1abb99d0c92014cf2359f8bca13e19. 2024-12-01T17:09:58,322 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(122): Close 9c76487e5a72f5c3f3d5290677627bc3 2024-12-01T17:09:58,322 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-01T17:09:58,322 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1722): Closing 9c76487e5a72f5c3f3d5290677627bc3, disabling compactions & flushes 2024-12-01T17:09:58,322 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1733072975977.9c76487e5a72f5c3f3d5290677627bc3. 2024-12-01T17:09:58,322 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733072975977.9c76487e5a72f5c3f3d5290677627bc3. 2024-12-01T17:09:58,322 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733072975977.9c76487e5a72f5c3f3d5290677627bc3. after waiting 0 ms 2024-12-01T17:09:58,322 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1733072975977.9c76487e5a72f5c3f3d5290677627bc3. 2024-12-01T17:09:58,326 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion/3b1abb99d0c92014cf2359f8bca13e19/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-01T17:09:58,326 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion/9c76487e5a72f5c3f3d5290677627bc3/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-01T17:09:58,326 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-01T17:09:58,327 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1733072975977.3b1abb99d0c92014cf2359f8bca13e19. 2024-12-01T17:09:58,327 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-01T17:09:58,327 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1676): Region close journal for 3b1abb99d0c92014cf2359f8bca13e19: Waiting for close lock at 1733072998322Running coprocessor pre-close hooks at 1733072998322Disabling compacts and flushes for region at 1733072998322Disabling writes for close at 1733072998322Writing region close event to WAL at 1733072998323 (+1 ms)Running coprocessor post-close hooks at 1733072998326 (+3 ms)Closed at 1733072998327 (+1 ms) 2024-12-01T17:09:58,327 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1733072975977.9c76487e5a72f5c3f3d5290677627bc3. 2024-12-01T17:09:58,327 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1676): Region close journal for 9c76487e5a72f5c3f3d5290677627bc3: Waiting for close lock at 1733072998322Running coprocessor pre-close hooks at 1733072998322Disabling compacts and flushes for region at 1733072998322Disabling writes for close at 1733072998322Writing region close event to WAL at 1733072998323 (+1 ms)Running coprocessor post-close hooks at 1733072998327 (+4 ms)Closed at 1733072998327 2024-12-01T17:09:58,328 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(157): Closed 3b1abb99d0c92014cf2359f8bca13e19 2024-12-01T17:09:58,329 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=164 updating hbase:meta row=3b1abb99d0c92014cf2359f8bca13e19, regionState=CLOSED 2024-12-01T17:09:58,329 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(157): Closed 9c76487e5a72f5c3f3d5290677627bc3 2024-12-01T17:09:58,330 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=165 updating hbase:meta row=9c76487e5a72f5c3f3d5290677627bc3, regionState=CLOSED 2024-12-01T17:09:58,330 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=166, ppid=164, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3b1abb99d0c92014cf2359f8bca13e19, server=b87883a7882d,44753,1733072737741 because future has completed 2024-12-01T17:09:58,331 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=167, ppid=165, state=RUNNABLE, hasLock=false; CloseRegionProcedure 9c76487e5a72f5c3f3d5290677627bc3, server=b87883a7882d,41375,1733072737967 because future has completed 2024-12-01T17:09:58,333 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=166, resume processing ppid=164 2024-12-01T17:09:58,333 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=166, ppid=164, state=SUCCESS, hasLock=false; CloseRegionProcedure 3b1abb99d0c92014cf2359f8bca13e19, server=b87883a7882d,44753,1733072737741 in 162 msec 2024-12-01T17:09:58,334 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=164, ppid=163, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=3b1abb99d0c92014cf2359f8bca13e19, UNASSIGN in 166 msec 2024-12-01T17:09:58,334 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=167, resume processing ppid=165 2024-12-01T17:09:58,334 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=167, ppid=165, state=SUCCESS, hasLock=false; CloseRegionProcedure 9c76487e5a72f5c3f3d5290677627bc3, server=b87883a7882d,41375,1733072737967 in 162 msec 2024-12-01T17:09:58,335 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=165, resume processing ppid=163 2024-12-01T17:09:58,335 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=165, ppid=163, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=9c76487e5a72f5c3f3d5290677627bc3, UNASSIGN in 167 msec 2024-12-01T17:09:58,337 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=163, resume processing ppid=162 2024-12-01T17:09:58,337 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=163, ppid=162, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 170 msec 2024-12-01T17:09:58,338 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733072998338"}]},"ts":"1733072998338"} 2024-12-01T17:09:58,339 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLED in hbase:meta 2024-12-01T17:09:58,340 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLED 2024-12-01T17:09:58,342 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=162, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 180 msec 2024-12-01T17:09:58,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-12-01T17:09:58,480 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-01T17:09:58,481 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testtb-testExportFileSystemStateWithMergeRegion 2024-12-01T17:09:58,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=168, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-01T17:09:58,483 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=168, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-01T17:09:58,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion 2024-12-01T17:09:58,483 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=168, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-01T17:09:58,485 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36521 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion 2024-12-01T17:09:58,487 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion/3b1abb99d0c92014cf2359f8bca13e19 2024-12-01T17:09:58,487 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion/9c76487e5a72f5c3f3d5290677627bc3 2024-12-01T17:09:58,489 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion/3b1abb99d0c92014cf2359f8bca13e19/cf, FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion/3b1abb99d0c92014cf2359f8bca13e19/recovered.edits] 2024-12-01T17:09:58,489 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion/9c76487e5a72f5c3f3d5290677627bc3/cf, FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion/9c76487e5a72f5c3f3d5290677627bc3/recovered.edits] 2024-12-01T17:09:58,492 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion/9c76487e5a72f5c3f3d5290677627bc3/cf/fd2339b42bd641e8a82f56852d290a0a to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/9c76487e5a72f5c3f3d5290677627bc3/cf/fd2339b42bd641e8a82f56852d290a0a 2024-12-01T17:09:58,492 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion/3b1abb99d0c92014cf2359f8bca13e19/cf/c68493ac04634af8910f790f99c6e96a to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/3b1abb99d0c92014cf2359f8bca13e19/cf/c68493ac04634af8910f790f99c6e96a 2024-12-01T17:09:58,494 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion/3b1abb99d0c92014cf2359f8bca13e19/recovered.edits/9.seqid to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/3b1abb99d0c92014cf2359f8bca13e19/recovered.edits/9.seqid 2024-12-01T17:09:58,494 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion/9c76487e5a72f5c3f3d5290677627bc3/recovered.edits/9.seqid to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/9c76487e5a72f5c3f3d5290677627bc3/recovered.edits/9.seqid 2024-12-01T17:09:58,495 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion/3b1abb99d0c92014cf2359f8bca13e19 2024-12-01T17:09:58,495 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithMergeRegion/9c76487e5a72f5c3f3d5290677627bc3 2024-12-01T17:09:58,495 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion regions 2024-12-01T17:09:58,496 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=168, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-01T17:09:58,498 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithMergeRegion from hbase:meta 2024-12-01T17:09:58,500 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion' descriptor. 2024-12-01T17:09:58,502 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=168, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-01T17:09:58,502 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion' from region states. 2024-12-01T17:09:58,502 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1733072975977.3b1abb99d0c92014cf2359f8bca13e19.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733072998502"}]},"ts":"9223372036854775807"} 2024-12-01T17:09:58,502 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1733072975977.9c76487e5a72f5c3f3d5290677627bc3.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733072998502"}]},"ts":"9223372036854775807"} 2024-12-01T17:09:58,504 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-01T17:09:58,504 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 3b1abb99d0c92014cf2359f8bca13e19, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733072975977.3b1abb99d0c92014cf2359f8bca13e19.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 9c76487e5a72f5c3f3d5290677627bc3, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733072975977.9c76487e5a72f5c3f3d5290677627bc3.', STARTKEY => '1', ENDKEY => ''}] 2024-12-01T17:09:58,504 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion' as deleted. 2024-12-01T17:09:58,505 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733072998504"}]},"ts":"9223372036854775807"} 2024-12-01T17:09:58,506 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-01T17:09:58,506 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-01T17:09:58,506 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-01T17:09:58,506 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-01T17:09:58,507 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-01T17:09:58,507 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-01T17:09:58,507 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion state from META 2024-12-01T17:09:58,507 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-01T17:09:58,507 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-01T17:09:58,508 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=168, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-01T17:09:58,509 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=168, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 27 msec 2024-12-01T17:09:58,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-01T17:09:58,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-01T17:09:58,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-01T17:09:58,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:09:58,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:09:58,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:09:58,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-01T17:09:58,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:09:58,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-12-01T17:09:58,518 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion 2024-12-01T17:09:58,518 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-01T17:09:58,526 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "emptySnaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-12-01T17:09:58,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-01T17:09:58,529 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-12-01T17:09:58,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-01T17:09:58,532 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion-1" type: DISABLED 2024-12-01T17:09:58,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-01T17:09:58,553 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=819 (was 808) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-69539515_22 at /127.0.0.1:42652 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1396781562) connection to localhost/127.0.0.1:39029 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-990436198_1 at /127.0.0.1:42644 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39029 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 141069) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-5868 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-990436198_1 at /127.0.0.1:45770 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-69539515_22 at /127.0.0.1:34642 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-69539515_22 at /127.0.0.1:45796 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=825 (was 809) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=690 (was 748), ProcessCount=19 (was 19), AvailableMemoryMB=3062 (was 3279) 2024-12-01T17:09:58,553 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=819 is superior to 500 2024-12-01T17:09:58,573 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=819, OpenFileDescriptor=825, MaxFileDescriptor=1048576, SystemLoadAverage=690, ProcessCount=19, AvailableMemoryMB=3061 2024-12-01T17:09:58,573 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=819 is superior to 500 2024-12-01T17:09:58,575 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T17:09:58,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=169, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-01T17:09:58,577 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-01T17:09:58,578 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportExpiredSnapshot" procId is: 169 2024-12-01T17:09:58,578 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:09:58,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-01T17:09:58,579 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-01T17:09:58,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742209_1385 (size=407) 2024-12-01T17:09:58,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742209_1385 (size=407) 2024-12-01T17:09:58,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742209_1385 (size=407) 2024-12-01T17:09:58,597 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 3e8ea0e5b158654ef2cc1fdfe08d5ab3, NAME => 'testtb-testExportExpiredSnapshot,,1733072998575.3e8ea0e5b158654ef2cc1fdfe08d5ab3.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:09:58,598 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => d420cd7efc5fb2675a2a2e3898975c57, NAME => 'testtb-testExportExpiredSnapshot,1,1733072998575.d420cd7efc5fb2675a2a2e3898975c57.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:09:58,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742210_1386 (size=68) 2024-12-01T17:09:58,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742210_1386 (size=68) 2024-12-01T17:09:58,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742210_1386 (size=68) 2024-12-01T17:09:58,624 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1733072998575.3e8ea0e5b158654ef2cc1fdfe08d5ab3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:09:58,624 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing 3e8ea0e5b158654ef2cc1fdfe08d5ab3, disabling compactions & flushes 2024-12-01T17:09:58,624 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1733072998575.3e8ea0e5b158654ef2cc1fdfe08d5ab3. 2024-12-01T17:09:58,624 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1733072998575.3e8ea0e5b158654ef2cc1fdfe08d5ab3. 2024-12-01T17:09:58,624 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1733072998575.3e8ea0e5b158654ef2cc1fdfe08d5ab3. after waiting 0 ms 2024-12-01T17:09:58,624 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1733072998575.3e8ea0e5b158654ef2cc1fdfe08d5ab3. 2024-12-01T17:09:58,624 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1733072998575.3e8ea0e5b158654ef2cc1fdfe08d5ab3. 2024-12-01T17:09:58,624 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for 3e8ea0e5b158654ef2cc1fdfe08d5ab3: Waiting for close lock at 1733072998624Disabling compacts and flushes for region at 1733072998624Disabling writes for close at 1733072998624Writing region close event to WAL at 1733072998624Closed at 1733072998624 2024-12-01T17:09:58,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742211_1387 (size=68) 2024-12-01T17:09:58,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742211_1387 (size=68) 2024-12-01T17:09:58,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742211_1387 (size=68) 2024-12-01T17:09:58,630 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1733072998575.d420cd7efc5fb2675a2a2e3898975c57.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:09:58,630 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing d420cd7efc5fb2675a2a2e3898975c57, disabling compactions & flushes 2024-12-01T17:09:58,631 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1733072998575.d420cd7efc5fb2675a2a2e3898975c57. 2024-12-01T17:09:58,631 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1733072998575.d420cd7efc5fb2675a2a2e3898975c57. 2024-12-01T17:09:58,631 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1733072998575.d420cd7efc5fb2675a2a2e3898975c57. after waiting 0 ms 2024-12-01T17:09:58,631 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1733072998575.d420cd7efc5fb2675a2a2e3898975c57. 2024-12-01T17:09:58,631 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1733072998575.d420cd7efc5fb2675a2a2e3898975c57. 2024-12-01T17:09:58,631 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for d420cd7efc5fb2675a2a2e3898975c57: Waiting for close lock at 1733072998630Disabling compacts and flushes for region at 1733072998630Disabling writes for close at 1733072998631 (+1 ms)Writing region close event to WAL at 1733072998631Closed at 1733072998631 2024-12-01T17:09:58,632 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-01T17:09:58,632 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,,1733072998575.3e8ea0e5b158654ef2cc1fdfe08d5ab3.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733072998632"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733072998632"}]},"ts":"1733072998632"} 2024-12-01T17:09:58,632 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,1,1733072998575.d420cd7efc5fb2675a2a2e3898975c57.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733072998632"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733072998632"}]},"ts":"1733072998632"} 2024-12-01T17:09:58,635 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-01T17:09:58,636 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-01T17:09:58,636 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733072998636"}]},"ts":"1733072998636"} 2024-12-01T17:09:58,638 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-01T17:09:58,638 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {b87883a7882d=0} racks are {/default-rack=0} 2024-12-01T17:09:58,639 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-01T17:09:58,639 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-01T17:09:58,639 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-01T17:09:58,639 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-01T17:09:58,639 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-01T17:09:58,639 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-01T17:09:58,639 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-01T17:09:58,639 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-01T17:09:58,639 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-01T17:09:58,639 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-01T17:09:58,639 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=3e8ea0e5b158654ef2cc1fdfe08d5ab3, ASSIGN}, {pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=d420cd7efc5fb2675a2a2e3898975c57, ASSIGN}] 2024-12-01T17:09:58,641 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=3e8ea0e5b158654ef2cc1fdfe08d5ab3, ASSIGN 2024-12-01T17:09:58,641 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=d420cd7efc5fb2675a2a2e3898975c57, ASSIGN 2024-12-01T17:09:58,642 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=3e8ea0e5b158654ef2cc1fdfe08d5ab3, ASSIGN; state=OFFLINE, location=b87883a7882d,36521,1733072737896; forceNewPlan=false, retain=false 2024-12-01T17:09:58,642 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=d420cd7efc5fb2675a2a2e3898975c57, ASSIGN; state=OFFLINE, location=b87883a7882d,44753,1733072737741; forceNewPlan=false, retain=false 2024-12-01T17:09:58,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-01T17:09:58,793 INFO [b87883a7882d:38737 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-01T17:09:58,793 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=d420cd7efc5fb2675a2a2e3898975c57, regionState=OPENING, regionLocation=b87883a7882d,44753,1733072737741 2024-12-01T17:09:58,793 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=3e8ea0e5b158654ef2cc1fdfe08d5ab3, regionState=OPENING, regionLocation=b87883a7882d,36521,1733072737896 2024-12-01T17:09:58,795 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=3e8ea0e5b158654ef2cc1fdfe08d5ab3, ASSIGN because future has completed 2024-12-01T17:09:58,796 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=172, ppid=170, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3e8ea0e5b158654ef2cc1fdfe08d5ab3, server=b87883a7882d,36521,1733072737896}] 2024-12-01T17:09:58,797 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=d420cd7efc5fb2675a2a2e3898975c57, ASSIGN because future has completed 2024-12-01T17:09:58,797 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=173, ppid=171, state=RUNNABLE, hasLock=false; OpenRegionProcedure d420cd7efc5fb2675a2a2e3898975c57, server=b87883a7882d,44753,1733072737741}] 2024-12-01T17:09:58,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-01T17:09:58,951 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,,1733072998575.3e8ea0e5b158654ef2cc1fdfe08d5ab3. 2024-12-01T17:09:58,951 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,1,1733072998575.d420cd7efc5fb2675a2a2e3898975c57. 2024-12-01T17:09:58,951 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7752): Opening region: {ENCODED => 3e8ea0e5b158654ef2cc1fdfe08d5ab3, NAME => 'testtb-testExportExpiredSnapshot,,1733072998575.3e8ea0e5b158654ef2cc1fdfe08d5ab3.', STARTKEY => '', ENDKEY => '1'} 2024-12-01T17:09:58,951 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7752): Opening region: {ENCODED => d420cd7efc5fb2675a2a2e3898975c57, NAME => 'testtb-testExportExpiredSnapshot,1,1733072998575.d420cd7efc5fb2675a2a2e3898975c57.', STARTKEY => '1', ENDKEY => ''} 2024-12-01T17:09:58,952 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,1,1733072998575.d420cd7efc5fb2675a2a2e3898975c57. service=AccessControlService 2024-12-01T17:09:58,952 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,,1733072998575.3e8ea0e5b158654ef2cc1fdfe08d5ab3. service=AccessControlService 2024-12-01T17:09:58,952 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-01T17:09:58,952 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-01T17:09:58,952 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot 3e8ea0e5b158654ef2cc1fdfe08d5ab3 2024-12-01T17:09:58,952 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot d420cd7efc5fb2675a2a2e3898975c57 2024-12-01T17:09:58,952 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1733072998575.d420cd7efc5fb2675a2a2e3898975c57.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:09:58,952 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1733072998575.3e8ea0e5b158654ef2cc1fdfe08d5ab3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:09:58,952 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7794): checking encryption for 3e8ea0e5b158654ef2cc1fdfe08d5ab3 2024-12-01T17:09:58,952 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7794): checking encryption for d420cd7efc5fb2675a2a2e3898975c57 2024-12-01T17:09:58,952 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7797): checking classloading for d420cd7efc5fb2675a2a2e3898975c57 2024-12-01T17:09:58,952 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7797): checking classloading for 3e8ea0e5b158654ef2cc1fdfe08d5ab3 2024-12-01T17:09:58,954 INFO [StoreOpener-d420cd7efc5fb2675a2a2e3898975c57-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d420cd7efc5fb2675a2a2e3898975c57 2024-12-01T17:09:58,955 INFO [StoreOpener-d420cd7efc5fb2675a2a2e3898975c57-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d420cd7efc5fb2675a2a2e3898975c57 columnFamilyName cf 2024-12-01T17:09:58,955 DEBUG [StoreOpener-d420cd7efc5fb2675a2a2e3898975c57-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:09:58,956 INFO [StoreOpener-d420cd7efc5fb2675a2a2e3898975c57-1 {}] regionserver.HStore(327): Store=d420cd7efc5fb2675a2a2e3898975c57/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T17:09:58,956 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1038): replaying wal for d420cd7efc5fb2675a2a2e3898975c57 2024-12-01T17:09:58,956 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportExpiredSnapshot/d420cd7efc5fb2675a2a2e3898975c57 2024-12-01T17:09:58,957 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportExpiredSnapshot/d420cd7efc5fb2675a2a2e3898975c57 2024-12-01T17:09:58,957 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1048): stopping wal replay for d420cd7efc5fb2675a2a2e3898975c57 2024-12-01T17:09:58,957 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1060): Cleaning up temporary data for d420cd7efc5fb2675a2a2e3898975c57 2024-12-01T17:09:58,958 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1093): writing seq id for d420cd7efc5fb2675a2a2e3898975c57 2024-12-01T17:09:58,959 INFO [StoreOpener-3e8ea0e5b158654ef2cc1fdfe08d5ab3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 3e8ea0e5b158654ef2cc1fdfe08d5ab3 2024-12-01T17:09:58,964 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportExpiredSnapshot/d420cd7efc5fb2675a2a2e3898975c57/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T17:09:58,965 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1114): Opened d420cd7efc5fb2675a2a2e3898975c57; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67508211, jitterRate=0.00595073401927948}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T17:09:58,965 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d420cd7efc5fb2675a2a2e3898975c57 2024-12-01T17:09:58,966 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1006): Region open journal for d420cd7efc5fb2675a2a2e3898975c57: Running coprocessor pre-open hook at 1733072998952Writing region info on filesystem at 1733072998952Initializing all the Stores at 1733072998953 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733072998953Cleaning up temporary data from old regions at 1733072998957 (+4 ms)Running coprocessor post-open hooks at 1733072998965 (+8 ms)Region opened successfully at 1733072998966 (+1 ms) 2024-12-01T17:09:58,967 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,1,1733072998575.d420cd7efc5fb2675a2a2e3898975c57., pid=173, masterSystemTime=1733072998949 2024-12-01T17:09:58,969 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,1,1733072998575.d420cd7efc5fb2675a2a2e3898975c57. 2024-12-01T17:09:58,970 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,1,1733072998575.d420cd7efc5fb2675a2a2e3898975c57. 2024-12-01T17:09:58,970 INFO [StoreOpener-3e8ea0e5b158654ef2cc1fdfe08d5ab3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3e8ea0e5b158654ef2cc1fdfe08d5ab3 columnFamilyName cf 2024-12-01T17:09:58,970 DEBUG [StoreOpener-3e8ea0e5b158654ef2cc1fdfe08d5ab3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:09:58,970 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=d420cd7efc5fb2675a2a2e3898975c57, regionState=OPEN, openSeqNum=2, regionLocation=b87883a7882d,44753,1733072737741 2024-12-01T17:09:58,971 INFO [StoreOpener-3e8ea0e5b158654ef2cc1fdfe08d5ab3-1 {}] regionserver.HStore(327): Store=3e8ea0e5b158654ef2cc1fdfe08d5ab3/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T17:09:58,972 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1038): replaying wal for 3e8ea0e5b158654ef2cc1fdfe08d5ab3 2024-12-01T17:09:58,973 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38737 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=b87883a7882d,44753,1733072737741, table=testtb-testExportExpiredSnapshot, region=d420cd7efc5fb2675a2a2e3898975c57. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-01T17:09:58,973 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportExpiredSnapshot/3e8ea0e5b158654ef2cc1fdfe08d5ab3 2024-12-01T17:09:58,973 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportExpiredSnapshot/3e8ea0e5b158654ef2cc1fdfe08d5ab3 2024-12-01T17:09:58,974 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=173, ppid=171, state=RUNNABLE, hasLock=false; OpenRegionProcedure d420cd7efc5fb2675a2a2e3898975c57, server=b87883a7882d,44753,1733072737741 because future has completed 2024-12-01T17:09:58,974 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1048): stopping wal replay for 3e8ea0e5b158654ef2cc1fdfe08d5ab3 2024-12-01T17:09:58,974 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1060): Cleaning up temporary data for 3e8ea0e5b158654ef2cc1fdfe08d5ab3 2024-12-01T17:09:58,978 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1093): writing seq id for 3e8ea0e5b158654ef2cc1fdfe08d5ab3 2024-12-01T17:09:58,978 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=173, resume processing ppid=171 2024-12-01T17:09:58,978 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=173, ppid=171, state=SUCCESS, hasLock=false; OpenRegionProcedure d420cd7efc5fb2675a2a2e3898975c57, server=b87883a7882d,44753,1733072737741 in 178 msec 2024-12-01T17:09:58,981 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportExpiredSnapshot/3e8ea0e5b158654ef2cc1fdfe08d5ab3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T17:09:58,982 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1114): Opened 3e8ea0e5b158654ef2cc1fdfe08d5ab3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63066335, jitterRate=-0.06023837625980377}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T17:09:58,982 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3e8ea0e5b158654ef2cc1fdfe08d5ab3 2024-12-01T17:09:58,982 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1006): Region open journal for 3e8ea0e5b158654ef2cc1fdfe08d5ab3: Running coprocessor pre-open hook at 1733072998952Writing region info on filesystem at 1733072998952Initializing all the Stores at 1733072998953 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733072998953Cleaning up temporary data from old regions at 1733072998974 (+21 ms)Running coprocessor post-open hooks at 1733072998982 (+8 ms)Region opened successfully at 1733072998982 2024-12-01T17:09:58,984 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,,1733072998575.3e8ea0e5b158654ef2cc1fdfe08d5ab3., pid=172, masterSystemTime=1733072998948 2024-12-01T17:09:58,986 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=171, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=d420cd7efc5fb2675a2a2e3898975c57, ASSIGN in 339 msec 2024-12-01T17:09:58,986 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,,1733072998575.3e8ea0e5b158654ef2cc1fdfe08d5ab3. 2024-12-01T17:09:58,986 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,,1733072998575.3e8ea0e5b158654ef2cc1fdfe08d5ab3. 2024-12-01T17:09:58,987 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=3e8ea0e5b158654ef2cc1fdfe08d5ab3, regionState=OPEN, openSeqNum=2, regionLocation=b87883a7882d,36521,1733072737896 2024-12-01T17:09:58,990 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=172, ppid=170, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3e8ea0e5b158654ef2cc1fdfe08d5ab3, server=b87883a7882d,36521,1733072737896 because future has completed 2024-12-01T17:09:58,997 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=172, resume processing ppid=170 2024-12-01T17:09:58,999 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=172, ppid=170, state=SUCCESS, hasLock=false; OpenRegionProcedure 3e8ea0e5b158654ef2cc1fdfe08d5ab3, server=b87883a7882d,36521,1733072737896 in 197 msec 2024-12-01T17:09:59,002 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=170, resume processing ppid=169 2024-12-01T17:09:59,002 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=170, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=3e8ea0e5b158654ef2cc1fdfe08d5ab3, ASSIGN in 358 msec 2024-12-01T17:09:59,004 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-01T17:09:59,005 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733072999004"}]},"ts":"1733072999004"} 2024-12-01T17:09:59,009 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-01T17:09:59,010 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-01T17:09:59,010 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportExpiredSnapshot jenkins: RWXCA 2024-12-01T17:09:59,017 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36521 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-01T17:09:59,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:09:59,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:09:59,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:09:59,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:09:59,075 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-01T17:09:59,075 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-01T17:09:59,075 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-01T17:09:59,076 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-01T17:09:59,078 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=169, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot in 500 msec 2024-12-01T17:09:59,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-01T17:09:59,210 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-01T17:09:59,210 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportExpiredSnapshot get assigned. Timeout = 60000ms 2024-12-01T17:09:59,211 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T17:09:59,215 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportExpiredSnapshot assigned to meta. Checking AM states. 2024-12-01T17:09:59,216 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T17:09:59,216 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportExpiredSnapshot assigned. 2024-12-01T17:09:59,216 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-01T17:09:59,219 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-01T17:09:59,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733072999219 (current time:1733072999219). 2024-12-01T17:09:59,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-01T17:09:59,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-01T17:09:59,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-01T17:09:59,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b24be90, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:09:59,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:09:59,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:09:59,221 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:09:59,221 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:09:59,221 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:09:59,222 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74ff3b7f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:09:59,222 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:09:59,222 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:09:59,222 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:09:59,223 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42718, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:09:59,224 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12a4b4e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:09:59,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:09:59,226 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:09:59,226 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:09:59,227 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43542, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:09:59,229 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737. 2024-12-01T17:09:59,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:09:59,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:09:59,229 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:09:59,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:09:59,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2eb24fa1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:09:59,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:09:59,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:09:59,231 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:09:59,231 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:09:59,231 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:09:59,231 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1027d73b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:09:59,231 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:09:59,231 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:09:59,232 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:09:59,233 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42736, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:09:59,233 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@625f7b7a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:09:59,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:09:59,235 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:09:59,235 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:09:59,236 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43544, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:09:59,238 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:09:59,238 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:09:59,239 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45012, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:09:59,240 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737. 2024-12-01T17:09:59,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor278.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:09:59,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:09:59,241 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:09:59,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:09:59,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-01T17:09:59,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-01T17:09:59,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=174, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-01T17:09:59,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 174 2024-12-01T17:09:59,244 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-01T17:09:59,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-12-01T17:09:59,245 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-01T17:09:59,247 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-01T17:09:59,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742212_1388 (size=170) 2024-12-01T17:09:59,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742212_1388 (size=170) 2024-12-01T17:09:59,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742212_1388 (size=170) 2024-12-01T17:09:59,261 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-01T17:09:59,261 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3e8ea0e5b158654ef2cc1fdfe08d5ab3}, {pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d420cd7efc5fb2675a2a2e3898975c57}] 2024-12-01T17:09:59,268 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3e8ea0e5b158654ef2cc1fdfe08d5ab3 2024-12-01T17:09:59,268 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d420cd7efc5fb2675a2a2e3898975c57 2024-12-01T17:09:59,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-12-01T17:09:59,420 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44753 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=176 2024-12-01T17:09:59,420 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36521 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=175 2024-12-01T17:09:59,420 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1733072998575.d420cd7efc5fb2675a2a2e3898975c57. 2024-12-01T17:09:59,421 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.HRegion(2603): Flush status journal for d420cd7efc5fb2675a2a2e3898975c57: 2024-12-01T17:09:59,421 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1733072998575.d420cd7efc5fb2675a2a2e3898975c57. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-01T17:09:59,421 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1733072998575.d420cd7efc5fb2675a2a2e3898975c57.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-01T17:09:59,421 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:09:59,421 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-01T17:09:59,421 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1733072998575.3e8ea0e5b158654ef2cc1fdfe08d5ab3. 2024-12-01T17:09:59,422 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.HRegion(2603): Flush status journal for 3e8ea0e5b158654ef2cc1fdfe08d5ab3: 2024-12-01T17:09:59,422 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1733072998575.3e8ea0e5b158654ef2cc1fdfe08d5ab3. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-01T17:09:59,422 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1733072998575.3e8ea0e5b158654ef2cc1fdfe08d5ab3.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-01T17:09:59,422 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:09:59,422 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-01T17:09:59,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742214_1390 (size=71) 2024-12-01T17:09:59,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742214_1390 (size=71) 2024-12-01T17:09:59,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742214_1390 (size=71) 2024-12-01T17:09:59,438 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1733072998575.d420cd7efc5fb2675a2a2e3898975c57. 2024-12-01T17:09:59,438 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=176 2024-12-01T17:09:59,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=176 2024-12-01T17:09:59,439 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region d420cd7efc5fb2675a2a2e3898975c57 2024-12-01T17:09:59,439 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d420cd7efc5fb2675a2a2e3898975c57 2024-12-01T17:09:59,441 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=176, ppid=174, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d420cd7efc5fb2675a2a2e3898975c57 in 179 msec 2024-12-01T17:09:59,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742213_1389 (size=71) 2024-12-01T17:09:59,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742213_1389 (size=71) 2024-12-01T17:09:59,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742213_1389 (size=71) 2024-12-01T17:09:59,442 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1733072998575.3e8ea0e5b158654ef2cc1fdfe08d5ab3. 2024-12-01T17:09:59,442 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=175 2024-12-01T17:09:59,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=175 2024-12-01T17:09:59,443 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region 3e8ea0e5b158654ef2cc1fdfe08d5ab3 2024-12-01T17:09:59,443 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3e8ea0e5b158654ef2cc1fdfe08d5ab3 2024-12-01T17:09:59,446 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=175, resume processing ppid=174 2024-12-01T17:09:59,446 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-01T17:09:59,446 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=175, ppid=174, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 3e8ea0e5b158654ef2cc1fdfe08d5ab3 in 183 msec 2024-12-01T17:09:59,446 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-01T17:09:59,447 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-01T17:09:59,447 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportExpiredSnapshot 2024-12-01T17:09:59,447 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot 2024-12-01T17:09:59,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742215_1391 (size=552) 2024-12-01T17:09:59,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742215_1391 (size=552) 2024-12-01T17:09:59,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742215_1391 (size=552) 2024-12-01T17:09:59,469 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-01T17:09:59,474 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-01T17:09:59,475 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/emptySnaptb0-testExportExpiredSnapshot 2024-12-01T17:09:59,477 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-01T17:09:59,477 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 174 2024-12-01T17:09:59,479 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=174, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 235 msec 2024-12-01T17:09:59,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-12-01T17:09:59,561 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-01T17:09:59,565 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='05a223317633a1c45f77bc51df44c8ff8', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,,1733072998575.3e8ea0e5b158654ef2cc1fdfe08d5ab3., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:09:59,566 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='170be820d0a732e5be757ac74780f1931', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1733072998575.d420cd7efc5fb2675a2a2e3898975c57., hostname=b87883a7882d,44753,1733072737741, seqNum=2] 2024-12-01T17:09:59,569 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='2695dc994d0a8f55f17c0724652a540f9', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1733072998575.d420cd7efc5fb2675a2a2e3898975c57., hostname=b87883a7882d,44753,1733072737741, seqNum=2] 2024-12-01T17:09:59,570 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='32a07c85072bd2b40caeccd0a940a15dd', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1733072998575.d420cd7efc5fb2675a2a2e3898975c57., hostname=b87883a7882d,44753,1733072737741, seqNum=2] 2024-12-01T17:09:59,571 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='4d1c26e989b051048bf040f9e3255efc5', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1733072998575.d420cd7efc5fb2675a2a2e3898975c57., hostname=b87883a7882d,44753,1733072737741, seqNum=2] 2024-12-01T17:09:59,574 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36521 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,,1733072998575.3e8ea0e5b158654ef2cc1fdfe08d5ab3. with WAL disabled. Data may be lost in the event of a crash. 2024-12-01T17:09:59,575 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44753 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,1,1733072998575.d420cd7efc5fb2675a2a2e3898975c57. with WAL disabled. Data may be lost in the event of a crash. 2024-12-01T17:09:59,576 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-01T17:09:59,579 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-12-01T17:09:59,579 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportExpiredSnapshot,,1733072998575.3e8ea0e5b158654ef2cc1fdfe08d5ab3. 2024-12-01T17:09:59,579 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T17:09:59,581 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-01T17:09:59,586 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-01T17:09:59,592 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-01T17:09:59,595 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-01T17:09:59,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733072999595 (current time:1733072999595). 2024-12-01T17:09:59,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-01T17:09:59,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-01T17:09:59,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-01T17:09:59,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@450dad33, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:09:59,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:09:59,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:09:59,596 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:09:59,596 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:09:59,597 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:09:59,597 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3bf1a162, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:09:59,597 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:09:59,597 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:09:59,597 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:09:59,598 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42758, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:09:59,598 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ab80a6b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:09:59,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:09:59,599 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:09:59,600 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:09:59,600 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43556, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:09:59,602 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737. 2024-12-01T17:09:59,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:09:59,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:09:59,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:09:59,602 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:09:59,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@277472d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:09:59,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:09:59,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:09:59,603 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:09:59,603 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:09:59,603 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:09:59,604 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a6dbbf2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:09:59,604 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:09:59,604 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:09:59,604 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:09:59,605 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42774, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:09:59,605 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75582757, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:09:59,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:09:59,606 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:09:59,607 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:09:59,607 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43570, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:09:59,609 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:09:59,609 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:09:59,610 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45022, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:09:59,611 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737. 2024-12-01T17:09:59,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor278.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:09:59,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:09:59,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:09:59,612 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:09:59,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-01T17:09:59,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-01T17:09:59,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=177, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-01T17:09:59,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 177 2024-12-01T17:09:59,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-12-01T17:09:59,615 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-01T17:09:59,616 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-01T17:09:59,618 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-01T17:09:59,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742216_1392 (size=165) 2024-12-01T17:09:59,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742216_1392 (size=165) 2024-12-01T17:09:59,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742216_1392 (size=165) 2024-12-01T17:09:59,625 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-01T17:09:59,625 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3e8ea0e5b158654ef2cc1fdfe08d5ab3}, {pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d420cd7efc5fb2675a2a2e3898975c57}] 2024-12-01T17:09:59,626 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3e8ea0e5b158654ef2cc1fdfe08d5ab3 2024-12-01T17:09:59,626 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d420cd7efc5fb2675a2a2e3898975c57 2024-12-01T17:09:59,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-12-01T17:09:59,778 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44753 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=179 2024-12-01T17:09:59,778 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36521 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=178 2024-12-01T17:09:59,778 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1733072998575.3e8ea0e5b158654ef2cc1fdfe08d5ab3. 2024-12-01T17:09:59,778 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1733072998575.d420cd7efc5fb2675a2a2e3898975c57. 2024-12-01T17:09:59,779 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(2902): Flushing 3e8ea0e5b158654ef2cc1fdfe08d5ab3 1/1 column families, dataSize=333 B heapSize=976 B 2024-12-01T17:09:59,779 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(2902): Flushing d420cd7efc5fb2675a2a2e3898975c57 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-12-01T17:09:59,794 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportExpiredSnapshot/d420cd7efc5fb2675a2a2e3898975c57/.tmp/cf/7a0a1d9fe46a43fa95219e7a58515ac3 is 71, key is 1588dc4c44be9a21b6f9069f99b9b34d/cf:q/1733072999575/Put/seqid=0 2024-12-01T17:09:59,794 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportExpiredSnapshot/3e8ea0e5b158654ef2cc1fdfe08d5ab3/.tmp/cf/4169bd2ac8e649c88c512687b1dbc395 is 71, key is 0079987bb63b1eb6ec186972d37ecc32/cf:q/1733072999574/Put/seqid=0 2024-12-01T17:09:59,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742217_1393 (size=5422) 2024-12-01T17:09:59,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742218_1394 (size=8188) 2024-12-01T17:09:59,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742217_1393 (size=5422) 2024-12-01T17:09:59,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742217_1393 (size=5422) 2024-12-01T17:09:59,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742218_1394 (size=8188) 2024-12-01T17:09:59,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742218_1394 (size=8188) 2024-12-01T17:09:59,801 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.93 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportExpiredSnapshot/d420cd7efc5fb2675a2a2e3898975c57/.tmp/cf/7a0a1d9fe46a43fa95219e7a58515ac3 2024-12-01T17:09:59,801 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=333 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportExpiredSnapshot/3e8ea0e5b158654ef2cc1fdfe08d5ab3/.tmp/cf/4169bd2ac8e649c88c512687b1dbc395 2024-12-01T17:09:59,805 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportExpiredSnapshot/3e8ea0e5b158654ef2cc1fdfe08d5ab3/.tmp/cf/4169bd2ac8e649c88c512687b1dbc395 as hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportExpiredSnapshot/3e8ea0e5b158654ef2cc1fdfe08d5ab3/cf/4169bd2ac8e649c88c512687b1dbc395 2024-12-01T17:09:59,805 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportExpiredSnapshot/d420cd7efc5fb2675a2a2e3898975c57/.tmp/cf/7a0a1d9fe46a43fa95219e7a58515ac3 as hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportExpiredSnapshot/d420cd7efc5fb2675a2a2e3898975c57/cf/7a0a1d9fe46a43fa95219e7a58515ac3 2024-12-01T17:09:59,809 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportExpiredSnapshot/d420cd7efc5fb2675a2a2e3898975c57/cf/7a0a1d9fe46a43fa95219e7a58515ac3, entries=45, sequenceid=6, filesize=8.0 K 2024-12-01T17:09:59,809 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportExpiredSnapshot/3e8ea0e5b158654ef2cc1fdfe08d5ab3/cf/4169bd2ac8e649c88c512687b1dbc395, entries=5, sequenceid=6, filesize=5.3 K 2024-12-01T17:09:59,809 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(3140): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for 3e8ea0e5b158654ef2cc1fdfe08d5ab3 in 31ms, sequenceid=6, compaction requested=false 2024-12-01T17:09:59,809 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(3140): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for d420cd7efc5fb2675a2a2e3898975c57 in 31ms, sequenceid=6, compaction requested=false 2024-12-01T17:09:59,809 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot' 2024-12-01T17:09:59,809 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot' 2024-12-01T17:09:59,810 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(2603): Flush status journal for 3e8ea0e5b158654ef2cc1fdfe08d5ab3: 2024-12-01T17:09:59,810 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(2603): Flush status journal for d420cd7efc5fb2675a2a2e3898975c57: 2024-12-01T17:09:59,810 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1733072998575.3e8ea0e5b158654ef2cc1fdfe08d5ab3. for snaptb0-testExportExpiredSnapshot completed. 2024-12-01T17:09:59,810 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1733072998575.d420cd7efc5fb2675a2a2e3898975c57. for snaptb0-testExportExpiredSnapshot completed. 2024-12-01T17:09:59,810 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1733072998575.3e8ea0e5b158654ef2cc1fdfe08d5ab3.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-01T17:09:59,810 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1733072998575.d420cd7efc5fb2675a2a2e3898975c57.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-01T17:09:59,810 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:09:59,810 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:09:59,810 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportExpiredSnapshot/3e8ea0e5b158654ef2cc1fdfe08d5ab3/cf/4169bd2ac8e649c88c512687b1dbc395] hfiles 2024-12-01T17:09:59,810 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportExpiredSnapshot/d420cd7efc5fb2675a2a2e3898975c57/cf/7a0a1d9fe46a43fa95219e7a58515ac3] hfiles 2024-12-01T17:09:59,810 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportExpiredSnapshot/3e8ea0e5b158654ef2cc1fdfe08d5ab3/cf/4169bd2ac8e649c88c512687b1dbc395 for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-01T17:09:59,810 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportExpiredSnapshot/d420cd7efc5fb2675a2a2e3898975c57/cf/7a0a1d9fe46a43fa95219e7a58515ac3 for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-01T17:09:59,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742219_1395 (size=110) 2024-12-01T17:09:59,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742220_1396 (size=110) 2024-12-01T17:09:59,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742219_1395 (size=110) 2024-12-01T17:09:59,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742220_1396 (size=110) 2024-12-01T17:09:59,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742219_1395 (size=110) 2024-12-01T17:09:59,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742220_1396 (size=110) 2024-12-01T17:09:59,816 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1733072998575.d420cd7efc5fb2675a2a2e3898975c57. 2024-12-01T17:09:59,816 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1733072998575.3e8ea0e5b158654ef2cc1fdfe08d5ab3. 2024-12-01T17:09:59,816 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=178 2024-12-01T17:09:59,816 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=179 2024-12-01T17:09:59,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=178 2024-12-01T17:09:59,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=179 2024-12-01T17:09:59,816 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region 3e8ea0e5b158654ef2cc1fdfe08d5ab3 2024-12-01T17:09:59,816 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region d420cd7efc5fb2675a2a2e3898975c57 2024-12-01T17:09:59,817 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d420cd7efc5fb2675a2a2e3898975c57 2024-12-01T17:09:59,817 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3e8ea0e5b158654ef2cc1fdfe08d5ab3 2024-12-01T17:09:59,818 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=178, ppid=177, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 3e8ea0e5b158654ef2cc1fdfe08d5ab3 in 192 msec 2024-12-01T17:09:59,819 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=179, resume processing ppid=177 2024-12-01T17:09:59,819 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=179, ppid=177, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d420cd7efc5fb2675a2a2e3898975c57 in 192 msec 2024-12-01T17:09:59,819 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-01T17:09:59,819 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-01T17:09:59,820 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-01T17:09:59,820 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportExpiredSnapshot 2024-12-01T17:09:59,821 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot 2024-12-01T17:09:59,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742221_1397 (size=630) 2024-12-01T17:09:59,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742221_1397 (size=630) 2024-12-01T17:09:59,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742221_1397 (size=630) 2024-12-01T17:09:59,835 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-01T17:09:59,839 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-01T17:09:59,839 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testExportExpiredSnapshot 2024-12-01T17:09:59,840 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-01T17:09:59,840 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 177 2024-12-01T17:09:59,841 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=177, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 228 msec 2024-12-01T17:09:59,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-12-01T17:09:59,932 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-01T17:09:59,935 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T17:09:59,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot 2024-12-01T17:09:59,940 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-01T17:09:59,940 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:09:59,940 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testExportExpiredSnapshot" procId is: 180 2024-12-01T17:09:59,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-01T17:09:59,942 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-01T17:09:59,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742222_1398 (size=400) 2024-12-01T17:09:59,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742222_1398 (size=400) 2024-12-01T17:09:59,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742222_1398 (size=400) 2024-12-01T17:09:59,953 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ced0bf6724c9afd6d078a6aaf8bb47a9, NAME => 'testExportExpiredSnapshot,,1733072999934.ced0bf6724c9afd6d078a6aaf8bb47a9.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:09:59,953 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 13964bd13cf62ff2c1f3f282d989070a, NAME => 'testExportExpiredSnapshot,1,1733072999934.13964bd13cf62ff2c1f3f282d989070a.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:09:59,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742224_1400 (size=61) 2024-12-01T17:09:59,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742223_1399 (size=61) 2024-12-01T17:09:59,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742223_1399 (size=61) 2024-12-01T17:09:59,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742224_1400 (size=61) 2024-12-01T17:09:59,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742223_1399 (size=61) 2024-12-01T17:09:59,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742224_1400 (size=61) 2024-12-01T17:09:59,961 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1733072999934.13964bd13cf62ff2c1f3f282d989070a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:09:59,961 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing 13964bd13cf62ff2c1f3f282d989070a, disabling compactions & flushes 2024-12-01T17:09:59,961 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1733072999934.ced0bf6724c9afd6d078a6aaf8bb47a9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:09:59,961 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1733072999934.13964bd13cf62ff2c1f3f282d989070a. 2024-12-01T17:09:59,962 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1733072999934.13964bd13cf62ff2c1f3f282d989070a. 2024-12-01T17:09:59,962 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1733072999934.13964bd13cf62ff2c1f3f282d989070a. after waiting 0 ms 2024-12-01T17:09:59,962 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing ced0bf6724c9afd6d078a6aaf8bb47a9, disabling compactions & flushes 2024-12-01T17:09:59,962 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1733072999934.13964bd13cf62ff2c1f3f282d989070a. 2024-12-01T17:09:59,962 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1733072999934.ced0bf6724c9afd6d078a6aaf8bb47a9. 2024-12-01T17:09:59,962 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1733072999934.13964bd13cf62ff2c1f3f282d989070a. 2024-12-01T17:09:59,962 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1733072999934.ced0bf6724c9afd6d078a6aaf8bb47a9. 2024-12-01T17:09:59,962 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1733072999934.ced0bf6724c9afd6d078a6aaf8bb47a9. after waiting 0 ms 2024-12-01T17:09:59,962 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for 13964bd13cf62ff2c1f3f282d989070a: Waiting for close lock at 1733072999961Disabling compacts and flushes for region at 1733072999961Disabling writes for close at 1733072999962 (+1 ms)Writing region close event to WAL at 1733072999962Closed at 1733072999962 2024-12-01T17:09:59,962 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1733072999934.ced0bf6724c9afd6d078a6aaf8bb47a9. 2024-12-01T17:09:59,962 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1733072999934.ced0bf6724c9afd6d078a6aaf8bb47a9. 2024-12-01T17:09:59,962 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for ced0bf6724c9afd6d078a6aaf8bb47a9: Waiting for close lock at 1733072999962Disabling compacts and flushes for region at 1733072999962Disabling writes for close at 1733072999962Writing region close event to WAL at 1733072999962Closed at 1733072999962 2024-12-01T17:09:59,963 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-01T17:09:59,963 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,1,1733072999934.13964bd13cf62ff2c1f3f282d989070a.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1733072999963"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733072999963"}]},"ts":"1733072999963"} 2024-12-01T17:09:59,963 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,,1733072999934.ced0bf6724c9afd6d078a6aaf8bb47a9.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1733072999963"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733072999963"}]},"ts":"1733072999963"} 2024-12-01T17:09:59,964 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-01T17:09:59,965 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-01T17:09:59,965 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733072999965"}]},"ts":"1733072999965"} 2024-12-01T17:09:59,966 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-01T17:09:59,966 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {b87883a7882d=0} racks are {/default-rack=0} 2024-12-01T17:09:59,967 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-01T17:09:59,967 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-01T17:09:59,967 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-01T17:09:59,967 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-01T17:09:59,967 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-01T17:09:59,967 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-01T17:09:59,967 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-01T17:09:59,967 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-01T17:09:59,967 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-01T17:09:59,968 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-01T17:09:59,968 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=ced0bf6724c9afd6d078a6aaf8bb47a9, ASSIGN}, {pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=13964bd13cf62ff2c1f3f282d989070a, ASSIGN}] 2024-12-01T17:09:59,969 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=13964bd13cf62ff2c1f3f282d989070a, ASSIGN 2024-12-01T17:09:59,969 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=ced0bf6724c9afd6d078a6aaf8bb47a9, ASSIGN 2024-12-01T17:09:59,969 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=13964bd13cf62ff2c1f3f282d989070a, ASSIGN; state=OFFLINE, location=b87883a7882d,41375,1733072737967; forceNewPlan=false, retain=false 2024-12-01T17:09:59,969 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=ced0bf6724c9afd6d078a6aaf8bb47a9, ASSIGN; state=OFFLINE, location=b87883a7882d,36521,1733072737896; forceNewPlan=false, retain=false 2024-12-01T17:10:00,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-01T17:10:00,120 INFO [b87883a7882d:38737 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-01T17:10:00,120 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=181 updating hbase:meta row=ced0bf6724c9afd6d078a6aaf8bb47a9, regionState=OPENING, regionLocation=b87883a7882d,36521,1733072737896 2024-12-01T17:10:00,120 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=182 updating hbase:meta row=13964bd13cf62ff2c1f3f282d989070a, regionState=OPENING, regionLocation=b87883a7882d,41375,1733072737967 2024-12-01T17:10:00,122 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=ced0bf6724c9afd6d078a6aaf8bb47a9, ASSIGN because future has completed 2024-12-01T17:10:00,122 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=183, ppid=181, state=RUNNABLE, hasLock=false; OpenRegionProcedure ced0bf6724c9afd6d078a6aaf8bb47a9, server=b87883a7882d,36521,1733072737896}] 2024-12-01T17:10:00,122 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=13964bd13cf62ff2c1f3f282d989070a, ASSIGN because future has completed 2024-12-01T17:10:00,123 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=184, ppid=182, state=RUNNABLE, hasLock=false; OpenRegionProcedure 13964bd13cf62ff2c1f3f282d989070a, server=b87883a7882d,41375,1733072737967}] 2024-12-01T17:10:00,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-01T17:10:00,279 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,,1733072999934.ced0bf6724c9afd6d078a6aaf8bb47a9. 2024-12-01T17:10:00,279 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7752): Opening region: {ENCODED => ced0bf6724c9afd6d078a6aaf8bb47a9, NAME => 'testExportExpiredSnapshot,,1733072999934.ced0bf6724c9afd6d078a6aaf8bb47a9.', STARTKEY => '', ENDKEY => '1'} 2024-12-01T17:10:00,279 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,1,1733072999934.13964bd13cf62ff2c1f3f282d989070a. 2024-12-01T17:10:00,280 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7752): Opening region: {ENCODED => 13964bd13cf62ff2c1f3f282d989070a, NAME => 'testExportExpiredSnapshot,1,1733072999934.13964bd13cf62ff2c1f3f282d989070a.', STARTKEY => '1', ENDKEY => ''} 2024-12-01T17:10:00,280 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,,1733072999934.ced0bf6724c9afd6d078a6aaf8bb47a9. service=AccessControlService 2024-12-01T17:10:00,280 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,1,1733072999934.13964bd13cf62ff2c1f3f282d989070a. service=AccessControlService 2024-12-01T17:10:00,280 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-01T17:10:00,280 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-01T17:10:00,280 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot ced0bf6724c9afd6d078a6aaf8bb47a9 2024-12-01T17:10:00,280 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1733072999934.ced0bf6724c9afd6d078a6aaf8bb47a9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:10:00,280 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 13964bd13cf62ff2c1f3f282d989070a 2024-12-01T17:10:00,280 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7794): checking encryption for ced0bf6724c9afd6d078a6aaf8bb47a9 2024-12-01T17:10:00,280 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1733072999934.13964bd13cf62ff2c1f3f282d989070a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:10:00,281 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7797): checking classloading for ced0bf6724c9afd6d078a6aaf8bb47a9 2024-12-01T17:10:00,281 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7794): checking encryption for 13964bd13cf62ff2c1f3f282d989070a 2024-12-01T17:10:00,281 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7797): checking classloading for 13964bd13cf62ff2c1f3f282d989070a 2024-12-01T17:10:00,282 INFO [StoreOpener-13964bd13cf62ff2c1f3f282d989070a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 13964bd13cf62ff2c1f3f282d989070a 2024-12-01T17:10:00,282 INFO [StoreOpener-ced0bf6724c9afd6d078a6aaf8bb47a9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ced0bf6724c9afd6d078a6aaf8bb47a9 2024-12-01T17:10:00,284 INFO [StoreOpener-13964bd13cf62ff2c1f3f282d989070a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 13964bd13cf62ff2c1f3f282d989070a columnFamilyName cf 2024-12-01T17:10:00,284 INFO [StoreOpener-ced0bf6724c9afd6d078a6aaf8bb47a9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ced0bf6724c9afd6d078a6aaf8bb47a9 columnFamilyName cf 2024-12-01T17:10:00,284 DEBUG [StoreOpener-13964bd13cf62ff2c1f3f282d989070a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:10:00,284 DEBUG [StoreOpener-ced0bf6724c9afd6d078a6aaf8bb47a9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:10:00,284 INFO [StoreOpener-13964bd13cf62ff2c1f3f282d989070a-1 {}] regionserver.HStore(327): Store=13964bd13cf62ff2c1f3f282d989070a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T17:10:00,284 INFO [StoreOpener-ced0bf6724c9afd6d078a6aaf8bb47a9-1 {}] regionserver.HStore(327): Store=ced0bf6724c9afd6d078a6aaf8bb47a9/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T17:10:00,284 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1038): replaying wal for ced0bf6724c9afd6d078a6aaf8bb47a9 2024-12-01T17:10:00,284 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1038): replaying wal for 13964bd13cf62ff2c1f3f282d989070a 2024-12-01T17:10:00,285 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportExpiredSnapshot/13964bd13cf62ff2c1f3f282d989070a 2024-12-01T17:10:00,285 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportExpiredSnapshot/ced0bf6724c9afd6d078a6aaf8bb47a9 2024-12-01T17:10:00,286 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportExpiredSnapshot/13964bd13cf62ff2c1f3f282d989070a 2024-12-01T17:10:00,286 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportExpiredSnapshot/ced0bf6724c9afd6d078a6aaf8bb47a9 2024-12-01T17:10:00,286 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1048): stopping wal replay for 13964bd13cf62ff2c1f3f282d989070a 2024-12-01T17:10:00,286 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1048): stopping wal replay for ced0bf6724c9afd6d078a6aaf8bb47a9 2024-12-01T17:10:00,286 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1060): Cleaning up temporary data for 13964bd13cf62ff2c1f3f282d989070a 2024-12-01T17:10:00,286 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1060): Cleaning up temporary data for ced0bf6724c9afd6d078a6aaf8bb47a9 2024-12-01T17:10:00,288 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1093): writing seq id for ced0bf6724c9afd6d078a6aaf8bb47a9 2024-12-01T17:10:00,288 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1093): writing seq id for 13964bd13cf62ff2c1f3f282d989070a 2024-12-01T17:10:00,290 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportExpiredSnapshot/13964bd13cf62ff2c1f3f282d989070a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T17:10:00,290 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportExpiredSnapshot/ced0bf6724c9afd6d078a6aaf8bb47a9/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T17:10:00,291 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1114): Opened 13964bd13cf62ff2c1f3f282d989070a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61014520, jitterRate=-0.0908128023147583}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T17:10:00,291 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1114): Opened ced0bf6724c9afd6d078a6aaf8bb47a9; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69799225, jitterRate=0.040089502930641174}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T17:10:00,291 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ced0bf6724c9afd6d078a6aaf8bb47a9 2024-12-01T17:10:00,291 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 13964bd13cf62ff2c1f3f282d989070a 2024-12-01T17:10:00,291 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1006): Region open journal for 13964bd13cf62ff2c1f3f282d989070a: Running coprocessor pre-open hook at 1733073000281Writing region info on filesystem at 1733073000281Initializing all the Stores at 1733073000282 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733073000282Cleaning up temporary data from old regions at 1733073000286 (+4 ms)Running coprocessor post-open hooks at 1733073000291 (+5 ms)Region opened successfully at 1733073000291 2024-12-01T17:10:00,291 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1006): Region open journal for ced0bf6724c9afd6d078a6aaf8bb47a9: Running coprocessor pre-open hook at 1733073000281Writing region info on filesystem at 1733073000281Initializing all the Stores at 1733073000282 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733073000282Cleaning up temporary data from old regions at 1733073000286 (+4 ms)Running coprocessor post-open hooks at 1733073000291 (+5 ms)Region opened successfully at 1733073000291 2024-12-01T17:10:00,292 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,,1733072999934.ced0bf6724c9afd6d078a6aaf8bb47a9., pid=183, masterSystemTime=1733073000273 2024-12-01T17:10:00,292 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,1,1733072999934.13964bd13cf62ff2c1f3f282d989070a., pid=184, masterSystemTime=1733073000275 2024-12-01T17:10:00,294 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,,1733072999934.ced0bf6724c9afd6d078a6aaf8bb47a9. 2024-12-01T17:10:00,294 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,,1733072999934.ced0bf6724c9afd6d078a6aaf8bb47a9. 2024-12-01T17:10:00,294 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=181 updating hbase:meta row=ced0bf6724c9afd6d078a6aaf8bb47a9, regionState=OPEN, openSeqNum=2, regionLocation=b87883a7882d,36521,1733072737896 2024-12-01T17:10:00,294 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,1,1733072999934.13964bd13cf62ff2c1f3f282d989070a. 2024-12-01T17:10:00,294 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,1,1733072999934.13964bd13cf62ff2c1f3f282d989070a. 2024-12-01T17:10:00,295 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=182 updating hbase:meta row=13964bd13cf62ff2c1f3f282d989070a, regionState=OPEN, openSeqNum=2, regionLocation=b87883a7882d,41375,1733072737967 2024-12-01T17:10:00,295 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=183, ppid=181, state=RUNNABLE, hasLock=false; OpenRegionProcedure ced0bf6724c9afd6d078a6aaf8bb47a9, server=b87883a7882d,36521,1733072737896 because future has completed 2024-12-01T17:10:00,296 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=184, ppid=182, state=RUNNABLE, hasLock=false; OpenRegionProcedure 13964bd13cf62ff2c1f3f282d989070a, server=b87883a7882d,41375,1733072737967 because future has completed 2024-12-01T17:10:00,297 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=183, resume processing ppid=181 2024-12-01T17:10:00,297 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=183, ppid=181, state=SUCCESS, hasLock=false; OpenRegionProcedure ced0bf6724c9afd6d078a6aaf8bb47a9, server=b87883a7882d,36521,1733072737896 in 174 msec 2024-12-01T17:10:00,298 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=181, ppid=180, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=ced0bf6724c9afd6d078a6aaf8bb47a9, ASSIGN in 329 msec 2024-12-01T17:10:00,298 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=184, resume processing ppid=182 2024-12-01T17:10:00,298 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=184, ppid=182, state=SUCCESS, hasLock=false; OpenRegionProcedure 13964bd13cf62ff2c1f3f282d989070a, server=b87883a7882d,41375,1733072737967 in 174 msec 2024-12-01T17:10:00,300 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=182, resume processing ppid=180 2024-12-01T17:10:00,300 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=182, ppid=180, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=13964bd13cf62ff2c1f3f282d989070a, ASSIGN in 330 msec 2024-12-01T17:10:00,300 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-01T17:10:00,300 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733073000300"}]},"ts":"1733073000300"} 2024-12-01T17:10:00,302 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-01T17:10:00,302 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-01T17:10:00,302 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testExportExpiredSnapshot jenkins: RWXCA 2024-12-01T17:10:00,305 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36521 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-01T17:10:00,401 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:10:00,401 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:10:00,401 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:10:00,401 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:10:00,446 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-01T17:10:00,446 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-01T17:10:00,446 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-01T17:10:00,446 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-01T17:10:00,447 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-01T17:10:00,447 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-01T17:10:00,447 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-01T17:10:00,447 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-01T17:10:00,448 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=180, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot in 510 msec 2024-12-01T17:10:00,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-01T17:10:00,571 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportExpiredSnapshot completed 2024-12-01T17:10:00,571 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testExportExpiredSnapshot get assigned. Timeout = 60000ms 2024-12-01T17:10:00,571 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T17:10:00,578 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testExportExpiredSnapshot assigned to meta. Checking AM states. 2024-12-01T17:10:00,578 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T17:10:00,578 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testExportExpiredSnapshot assigned. 2024-12-01T17:10:00,579 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-01T17:10:00,584 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='0ff03bb17e815d3d95ce58fa97bd7931e', locateType=CURRENT is [region=testExportExpiredSnapshot,,1733072999934.ced0bf6724c9afd6d078a6aaf8bb47a9., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:10:00,585 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='1c0934b3288bb59c70770befae5dc069d', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1733072999934.13964bd13cf62ff2c1f3f282d989070a., hostname=b87883a7882d,41375,1733072737967, seqNum=2] 2024-12-01T17:10:00,586 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='27de1919b7f12799306b0d7c1a6303a23', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1733072999934.13964bd13cf62ff2c1f3f282d989070a., hostname=b87883a7882d,41375,1733072737967, seqNum=2] 2024-12-01T17:10:00,587 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='31b91095ecd46d539223e676fb7c88876', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1733072999934.13964bd13cf62ff2c1f3f282d989070a., hostname=b87883a7882d,41375,1733072737967, seqNum=2] 2024-12-01T17:10:00,587 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='41b78f7cb86a4c386ab342083cf8e89d0', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1733072999934.13964bd13cf62ff2c1f3f282d989070a., hostname=b87883a7882d,41375,1733072737967, seqNum=2] 2024-12-01T17:10:00,590 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36521 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,,1733072999934.ced0bf6724c9afd6d078a6aaf8bb47a9. with WAL disabled. Data may be lost in the event of a crash. 2024-12-01T17:10:00,593 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41375 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,1,1733072999934.13964bd13cf62ff2c1f3f282d989070a. with WAL disabled. Data may be lost in the event of a crash. 2024-12-01T17:10:00,594 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-01T17:10:00,597 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportExpiredSnapshot 2024-12-01T17:10:00,597 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportExpiredSnapshot,,1733072999934.ced0bf6724c9afd6d078a6aaf8bb47a9. 2024-12-01T17:10:00,597 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T17:10:00,598 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-01T17:10:00,602 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-01T17:10:00,607 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-01T17:10:00,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-01T17:10:00,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-01T17:10:00,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@749c1544, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:10:00,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:10:00,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:10:00,609 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:10:00,609 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:10:00,609 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:10:00,609 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f54b2da, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:10:00,609 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:10:00,609 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:10:00,609 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:10:00,610 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42782, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:10:00,610 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3cd344dd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:10:00,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:10:00,611 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:10:00,611 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:10:00,612 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43572, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:10:00,613 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737. 2024-12-01T17:10:00,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:10:00,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:10:00,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:10:00,613 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:10:00,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b01d4d6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:10:00,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:10:00,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:10:00,614 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:10:00,615 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:10:00,615 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:10:00,615 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3fafda39, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:10:00,615 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:10:00,615 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:10:00,615 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:10:00,616 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42794, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:10:00,616 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70b61357, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:10:00,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:10:00,617 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:10:00,617 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:10:00,618 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43578, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:10:00,620 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:10:00,620 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:10:00,621 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45038, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:10:00,622 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737. 2024-12-01T17:10:00,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor278.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:10:00,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:10:00,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:10:00,622 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:10:00,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-01T17:10:00,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-01T17:10:00,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=185, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-01T17:10:00,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 185 2024-12-01T17:10:00,624 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PREPARE 2024-12-01T17:10:00,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-01T17:10:00,625 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-01T17:10:00,627 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-01T17:10:00,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742225_1401 (size=152) 2024-12-01T17:10:00,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742225_1401 (size=152) 2024-12-01T17:10:00,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742225_1401 (size=152) 2024-12-01T17:10:00,633 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-01T17:10:00,634 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ced0bf6724c9afd6d078a6aaf8bb47a9}, {pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 13964bd13cf62ff2c1f3f282d989070a}] 2024-12-01T17:10:00,634 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 13964bd13cf62ff2c1f3f282d989070a 2024-12-01T17:10:00,634 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ced0bf6724c9afd6d078a6aaf8bb47a9 2024-12-01T17:10:00,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-01T17:10:00,787 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36521 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=186 2024-12-01T17:10:00,787 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41375 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=187 2024-12-01T17:10:00,787 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,,1733072999934.ced0bf6724c9afd6d078a6aaf8bb47a9. 2024-12-01T17:10:00,787 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,1,1733072999934.13964bd13cf62ff2c1f3f282d989070a. 2024-12-01T17:10:00,787 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(2902): Flushing ced0bf6724c9afd6d078a6aaf8bb47a9 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-01T17:10:00,788 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(2902): Flushing 13964bd13cf62ff2c1f3f282d989070a 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-01T17:10:00,806 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportExpiredSnapshot/13964bd13cf62ff2c1f3f282d989070a/.tmp/cf/be83fb22bf644ff19d532d0d6436b1cb is 71, key is 1c78421e6b9ede76d7ec8b75dde19e19/cf:q/1733073000593/Put/seqid=0 2024-12-01T17:10:00,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742226_1402 (size=8394) 2024-12-01T17:10:00,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742226_1402 (size=8394) 2024-12-01T17:10:00,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742226_1402 (size=8394) 2024-12-01T17:10:00,812 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.13 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportExpiredSnapshot/13964bd13cf62ff2c1f3f282d989070a/.tmp/cf/be83fb22bf644ff19d532d0d6436b1cb 2024-12-01T17:10:00,814 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportExpiredSnapshot/ced0bf6724c9afd6d078a6aaf8bb47a9/.tmp/cf/6f7c0df7798f486a9c1eb43472ee5c44 is 71, key is 01b2f4b660db1a023f1f5d50b1e0ce8c/cf:q/1733073000590/Put/seqid=0 2024-12-01T17:10:00,817 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportExpiredSnapshot/13964bd13cf62ff2c1f3f282d989070a/.tmp/cf/be83fb22bf644ff19d532d0d6436b1cb as hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportExpiredSnapshot/13964bd13cf62ff2c1f3f282d989070a/cf/be83fb22bf644ff19d532d0d6436b1cb 2024-12-01T17:10:00,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742227_1403 (size=5216) 2024-12-01T17:10:00,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742227_1403 (size=5216) 2024-12-01T17:10:00,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742227_1403 (size=5216) 2024-12-01T17:10:00,819 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=132 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportExpiredSnapshot/ced0bf6724c9afd6d078a6aaf8bb47a9/.tmp/cf/6f7c0df7798f486a9c1eb43472ee5c44 2024-12-01T17:10:00,821 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportExpiredSnapshot/13964bd13cf62ff2c1f3f282d989070a/cf/be83fb22bf644ff19d532d0d6436b1cb, entries=48, sequenceid=5, filesize=8.2 K 2024-12-01T17:10:00,822 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 13964bd13cf62ff2c1f3f282d989070a in 35ms, sequenceid=5, compaction requested=false 2024-12-01T17:10:00,822 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-12-01T17:10:00,823 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(2603): Flush status journal for 13964bd13cf62ff2c1f3f282d989070a: 2024-12-01T17:10:00,823 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,1,1733072999934.13964bd13cf62ff2c1f3f282d989070a. for snapshot-testExportExpiredSnapshot completed. 2024-12-01T17:10:00,823 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,1,1733072999934.13964bd13cf62ff2c1f3f282d989070a.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-01T17:10:00,823 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:10:00,823 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportExpiredSnapshot/13964bd13cf62ff2c1f3f282d989070a/cf/be83fb22bf644ff19d532d0d6436b1cb] hfiles 2024-12-01T17:10:00,823 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportExpiredSnapshot/13964bd13cf62ff2c1f3f282d989070a/cf/be83fb22bf644ff19d532d0d6436b1cb for snapshot=snapshot-testExportExpiredSnapshot 2024-12-01T17:10:00,823 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportExpiredSnapshot/ced0bf6724c9afd6d078a6aaf8bb47a9/.tmp/cf/6f7c0df7798f486a9c1eb43472ee5c44 as hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportExpiredSnapshot/ced0bf6724c9afd6d078a6aaf8bb47a9/cf/6f7c0df7798f486a9c1eb43472ee5c44 2024-12-01T17:10:00,827 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportExpiredSnapshot/ced0bf6724c9afd6d078a6aaf8bb47a9/cf/6f7c0df7798f486a9c1eb43472ee5c44, entries=2, sequenceid=5, filesize=5.1 K 2024-12-01T17:10:00,828 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for ced0bf6724c9afd6d078a6aaf8bb47a9 in 41ms, sequenceid=5, compaction requested=false 2024-12-01T17:10:00,828 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(2603): Flush status journal for ced0bf6724c9afd6d078a6aaf8bb47a9: 2024-12-01T17:10:00,828 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,,1733072999934.ced0bf6724c9afd6d078a6aaf8bb47a9. for snapshot-testExportExpiredSnapshot completed. 2024-12-01T17:10:00,829 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,,1733072999934.ced0bf6724c9afd6d078a6aaf8bb47a9.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-01T17:10:00,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742228_1404 (size=103) 2024-12-01T17:10:00,829 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:10:00,829 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportExpiredSnapshot/ced0bf6724c9afd6d078a6aaf8bb47a9/cf/6f7c0df7798f486a9c1eb43472ee5c44] hfiles 2024-12-01T17:10:00,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742228_1404 (size=103) 2024-12-01T17:10:00,829 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportExpiredSnapshot/ced0bf6724c9afd6d078a6aaf8bb47a9/cf/6f7c0df7798f486a9c1eb43472ee5c44 for snapshot=snapshot-testExportExpiredSnapshot 2024-12-01T17:10:00,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742228_1404 (size=103) 2024-12-01T17:10:00,829 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,1,1733072999934.13964bd13cf62ff2c1f3f282d989070a. 2024-12-01T17:10:00,829 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=187 2024-12-01T17:10:00,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=187 2024-12-01T17:10:00,830 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region 13964bd13cf62ff2c1f3f282d989070a 2024-12-01T17:10:00,830 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 13964bd13cf62ff2c1f3f282d989070a 2024-12-01T17:10:00,832 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=187, ppid=185, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 13964bd13cf62ff2c1f3f282d989070a in 197 msec 2024-12-01T17:10:00,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742229_1405 (size=103) 2024-12-01T17:10:00,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742229_1405 (size=103) 2024-12-01T17:10:00,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742229_1405 (size=103) 2024-12-01T17:10:00,834 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,,1733072999934.ced0bf6724c9afd6d078a6aaf8bb47a9. 2024-12-01T17:10:00,834 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=186 2024-12-01T17:10:00,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=186 2024-12-01T17:10:00,835 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region ced0bf6724c9afd6d078a6aaf8bb47a9 2024-12-01T17:10:00,835 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ced0bf6724c9afd6d078a6aaf8bb47a9 2024-12-01T17:10:00,837 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=186, resume processing ppid=185 2024-12-01T17:10:00,837 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-01T17:10:00,837 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=186, ppid=185, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ced0bf6724c9afd6d078a6aaf8bb47a9 in 201 msec 2024-12-01T17:10:00,838 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-01T17:10:00,838 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-01T17:10:00,838 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportExpiredSnapshot 2024-12-01T17:10:00,839 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-01T17:10:00,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742230_1406 (size=609) 2024-12-01T17:10:00,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742230_1406 (size=609) 2024-12-01T17:10:00,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742230_1406 (size=609) 2024-12-01T17:10:00,850 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-01T17:10:00,854 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-01T17:10:00,854 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snapshot-testExportExpiredSnapshot 2024-12-01T17:10:00,855 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_POST_OPERATION 2024-12-01T17:10:00,855 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 185 2024-12-01T17:10:00,856 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=185, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } in 232 msec 2024-12-01T17:10:00,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-01T17:10:00,941 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportExpiredSnapshot completed 2024-12-01T17:10:01,817 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0007_000001 (auth:SIMPLE) from 127.0.0.1:51916 2024-12-01T17:10:01,832 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_1/usercache/jenkins/appcache/application_1733072744857_0007/container_1733072744857_0007_01_000001/launch_container.sh] 2024-12-01T17:10:01,832 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_1/usercache/jenkins/appcache/application_1733072744857_0007/container_1733072744857_0007_01_000001/container_tokens] 2024-12-01T17:10:01,832 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_1/usercache/jenkins/appcache/application_1733072744857_0007/container_1733072744857_0007_01_000001/sysfs] 2024-12-01T17:10:03,466 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-01T17:10:05,859 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-01T17:10:07,330 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-01T17:10:07,330 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-01T17:10:07,330 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-01T17:10:07,330 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-01T17:10:07,331 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-01T17:10:07,331 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-01T17:10:10,953 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733073010952 2024-12-01T17:10:10,953 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:44451, tgtDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733073010952, rawTgtDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733073010952, srcFsUri=hdfs://localhost:44451, srcDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:10:10,982 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:44451, inputRoot=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:10:10,982 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-69539515_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733073010952, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733073010952/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-01T17:10:10,984 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-01T17:10:10,985 ERROR [Time-limited test {}] util.AbstractHBaseTool(152): Error running command-line tool org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException: TTL for snapshot 'snapshot-testExportExpiredSnapshot' has already expired. at org.apache.hadoop.hbase.snapshot.ExportSnapshot.verifySnapshot(ExportSnapshot.java:960) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1105) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:570) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportExpiredSnapshot(TestExportSnapshot.java:362) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T17:10:10,986 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testtb-testExportExpiredSnapshot 2024-12-01T17:10:10,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=188, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-01T17:10:10,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-12-01T17:10:10,988 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733073010988"}]},"ts":"1733073010988"} 2024-12-01T17:10:10,989 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLING in hbase:meta 2024-12-01T17:10:10,989 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportExpiredSnapshot to state=DISABLING 2024-12-01T17:10:10,991 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=189, ppid=188, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot}] 2024-12-01T17:10:10,992 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=3e8ea0e5b158654ef2cc1fdfe08d5ab3, UNASSIGN}, {pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=d420cd7efc5fb2675a2a2e3898975c57, UNASSIGN}] 2024-12-01T17:10:10,993 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=d420cd7efc5fb2675a2a2e3898975c57, UNASSIGN 2024-12-01T17:10:10,993 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=3e8ea0e5b158654ef2cc1fdfe08d5ab3, UNASSIGN 2024-12-01T17:10:10,993 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=191 updating hbase:meta row=d420cd7efc5fb2675a2a2e3898975c57, regionState=CLOSING, regionLocation=b87883a7882d,44753,1733072737741 2024-12-01T17:10:10,993 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=190 updating hbase:meta row=3e8ea0e5b158654ef2cc1fdfe08d5ab3, regionState=CLOSING, regionLocation=b87883a7882d,36521,1733072737896 2024-12-01T17:10:10,994 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=d420cd7efc5fb2675a2a2e3898975c57, UNASSIGN because future has completed 2024-12-01T17:10:10,995 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-01T17:10:10,995 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=192, ppid=191, state=RUNNABLE, hasLock=false; CloseRegionProcedure d420cd7efc5fb2675a2a2e3898975c57, server=b87883a7882d,44753,1733072737741}] 2024-12-01T17:10:10,995 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=3e8ea0e5b158654ef2cc1fdfe08d5ab3, UNASSIGN because future has completed 2024-12-01T17:10:10,995 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-01T17:10:10,995 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=193, ppid=190, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3e8ea0e5b158654ef2cc1fdfe08d5ab3, server=b87883a7882d,36521,1733072737896}] 2024-12-01T17:10:11,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-12-01T17:10:11,147 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(122): Close d420cd7efc5fb2675a2a2e3898975c57 2024-12-01T17:10:11,148 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-01T17:10:11,148 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1722): Closing d420cd7efc5fb2675a2a2e3898975c57, disabling compactions & flushes 2024-12-01T17:10:11,148 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1733072998575.d420cd7efc5fb2675a2a2e3898975c57. 2024-12-01T17:10:11,148 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1733072998575.d420cd7efc5fb2675a2a2e3898975c57. 2024-12-01T17:10:11,148 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1733072998575.d420cd7efc5fb2675a2a2e3898975c57. after waiting 0 ms 2024-12-01T17:10:11,148 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1733072998575.d420cd7efc5fb2675a2a2e3898975c57. 2024-12-01T17:10:11,149 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(122): Close 3e8ea0e5b158654ef2cc1fdfe08d5ab3 2024-12-01T17:10:11,149 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-01T17:10:11,149 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1722): Closing 3e8ea0e5b158654ef2cc1fdfe08d5ab3, disabling compactions & flushes 2024-12-01T17:10:11,149 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1733072998575.3e8ea0e5b158654ef2cc1fdfe08d5ab3. 2024-12-01T17:10:11,150 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1733072998575.3e8ea0e5b158654ef2cc1fdfe08d5ab3. 2024-12-01T17:10:11,150 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1733072998575.3e8ea0e5b158654ef2cc1fdfe08d5ab3. after waiting 0 ms 2024-12-01T17:10:11,150 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1733072998575.3e8ea0e5b158654ef2cc1fdfe08d5ab3. 2024-12-01T17:10:11,156 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportExpiredSnapshot/d420cd7efc5fb2675a2a2e3898975c57/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-01T17:10:11,156 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportExpiredSnapshot/3e8ea0e5b158654ef2cc1fdfe08d5ab3/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-01T17:10:11,156 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-01T17:10:11,156 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1733072998575.d420cd7efc5fb2675a2a2e3898975c57. 2024-12-01T17:10:11,157 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1676): Region close journal for d420cd7efc5fb2675a2a2e3898975c57: Waiting for close lock at 1733073011148Running coprocessor pre-close hooks at 1733073011148Disabling compacts and flushes for region at 1733073011148Disabling writes for close at 1733073011148Writing region close event to WAL at 1733073011150 (+2 ms)Running coprocessor post-close hooks at 1733073011156 (+6 ms)Closed at 1733073011156 2024-12-01T17:10:11,157 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-01T17:10:11,157 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1733072998575.3e8ea0e5b158654ef2cc1fdfe08d5ab3. 2024-12-01T17:10:11,157 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1676): Region close journal for 3e8ea0e5b158654ef2cc1fdfe08d5ab3: Waiting for close lock at 1733073011149Running coprocessor pre-close hooks at 1733073011149Disabling compacts and flushes for region at 1733073011149Disabling writes for close at 1733073011150 (+1 ms)Writing region close event to WAL at 1733073011151 (+1 ms)Running coprocessor post-close hooks at 1733073011157 (+6 ms)Closed at 1733073011157 2024-12-01T17:10:11,158 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(157): Closed d420cd7efc5fb2675a2a2e3898975c57 2024-12-01T17:10:11,159 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=191 updating hbase:meta row=d420cd7efc5fb2675a2a2e3898975c57, regionState=CLOSED 2024-12-01T17:10:11,159 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(157): Closed 3e8ea0e5b158654ef2cc1fdfe08d5ab3 2024-12-01T17:10:11,160 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=190 updating hbase:meta row=3e8ea0e5b158654ef2cc1fdfe08d5ab3, regionState=CLOSED 2024-12-01T17:10:11,161 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=192, ppid=191, state=RUNNABLE, hasLock=false; CloseRegionProcedure d420cd7efc5fb2675a2a2e3898975c57, server=b87883a7882d,44753,1733072737741 because future has completed 2024-12-01T17:10:11,161 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=193, ppid=190, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3e8ea0e5b158654ef2cc1fdfe08d5ab3, server=b87883a7882d,36521,1733072737896 because future has completed 2024-12-01T17:10:11,163 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=192, resume processing ppid=191 2024-12-01T17:10:11,163 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=192, ppid=191, state=SUCCESS, hasLock=false; CloseRegionProcedure d420cd7efc5fb2675a2a2e3898975c57, server=b87883a7882d,44753,1733072737741 in 166 msec 2024-12-01T17:10:11,163 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=193, resume processing ppid=190 2024-12-01T17:10:11,163 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=193, ppid=190, state=SUCCESS, hasLock=false; CloseRegionProcedure 3e8ea0e5b158654ef2cc1fdfe08d5ab3, server=b87883a7882d,36521,1733072737896 in 167 msec 2024-12-01T17:10:11,164 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=191, ppid=189, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=d420cd7efc5fb2675a2a2e3898975c57, UNASSIGN in 171 msec 2024-12-01T17:10:11,165 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=190, resume processing ppid=189 2024-12-01T17:10:11,165 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=190, ppid=189, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=3e8ea0e5b158654ef2cc1fdfe08d5ab3, UNASSIGN in 171 msec 2024-12-01T17:10:11,166 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=189, resume processing ppid=188 2024-12-01T17:10:11,166 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=189, ppid=188, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot in 174 msec 2024-12-01T17:10:11,167 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733073011167"}]},"ts":"1733073011167"} 2024-12-01T17:10:11,168 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLED in hbase:meta 2024-12-01T17:10:11,168 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportExpiredSnapshot to state=DISABLED 2024-12-01T17:10:11,170 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=188, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot in 183 msec 2024-12-01T17:10:11,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-12-01T17:10:11,300 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-01T17:10:11,301 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testtb-testExportExpiredSnapshot 2024-12-01T17:10:11,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=194, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-01T17:10:11,303 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=194, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-01T17:10:11,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportExpiredSnapshot 2024-12-01T17:10:11,304 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=194, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-01T17:10:11,306 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36521 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportExpiredSnapshot 2024-12-01T17:10:11,307 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportExpiredSnapshot/3e8ea0e5b158654ef2cc1fdfe08d5ab3 2024-12-01T17:10:11,307 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportExpiredSnapshot/d420cd7efc5fb2675a2a2e3898975c57 2024-12-01T17:10:11,309 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportExpiredSnapshot/d420cd7efc5fb2675a2a2e3898975c57/cf, FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportExpiredSnapshot/d420cd7efc5fb2675a2a2e3898975c57/recovered.edits] 2024-12-01T17:10:11,309 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportExpiredSnapshot/3e8ea0e5b158654ef2cc1fdfe08d5ab3/cf, FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportExpiredSnapshot/3e8ea0e5b158654ef2cc1fdfe08d5ab3/recovered.edits] 2024-12-01T17:10:11,314 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportExpiredSnapshot/3e8ea0e5b158654ef2cc1fdfe08d5ab3/cf/4169bd2ac8e649c88c512687b1dbc395 to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testExportExpiredSnapshot/3e8ea0e5b158654ef2cc1fdfe08d5ab3/cf/4169bd2ac8e649c88c512687b1dbc395 2024-12-01T17:10:11,314 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportExpiredSnapshot/d420cd7efc5fb2675a2a2e3898975c57/cf/7a0a1d9fe46a43fa95219e7a58515ac3 to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testExportExpiredSnapshot/d420cd7efc5fb2675a2a2e3898975c57/cf/7a0a1d9fe46a43fa95219e7a58515ac3 2024-12-01T17:10:11,317 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportExpiredSnapshot/3e8ea0e5b158654ef2cc1fdfe08d5ab3/recovered.edits/9.seqid to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testExportExpiredSnapshot/3e8ea0e5b158654ef2cc1fdfe08d5ab3/recovered.edits/9.seqid 2024-12-01T17:10:11,317 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportExpiredSnapshot/d420cd7efc5fb2675a2a2e3898975c57/recovered.edits/9.seqid to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testExportExpiredSnapshot/d420cd7efc5fb2675a2a2e3898975c57/recovered.edits/9.seqid 2024-12-01T17:10:11,318 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportExpiredSnapshot/3e8ea0e5b158654ef2cc1fdfe08d5ab3 2024-12-01T17:10:11,318 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportExpiredSnapshot/d420cd7efc5fb2675a2a2e3898975c57 2024-12-01T17:10:11,318 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportExpiredSnapshot regions 2024-12-01T17:10:11,320 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=194, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-01T17:10:11,323 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportExpiredSnapshot from hbase:meta 2024-12-01T17:10:11,325 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportExpiredSnapshot' descriptor. 2024-12-01T17:10:11,327 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=194, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-01T17:10:11,327 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportExpiredSnapshot' from region states. 2024-12-01T17:10:11,327 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,,1733072998575.3e8ea0e5b158654ef2cc1fdfe08d5ab3.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733073011327"}]},"ts":"9223372036854775807"} 2024-12-01T17:10:11,327 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,1,1733072998575.d420cd7efc5fb2675a2a2e3898975c57.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733073011327"}]},"ts":"9223372036854775807"} 2024-12-01T17:10:11,329 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-01T17:10:11,329 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 3e8ea0e5b158654ef2cc1fdfe08d5ab3, NAME => 'testtb-testExportExpiredSnapshot,,1733072998575.3e8ea0e5b158654ef2cc1fdfe08d5ab3.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => d420cd7efc5fb2675a2a2e3898975c57, NAME => 'testtb-testExportExpiredSnapshot,1,1733072998575.d420cd7efc5fb2675a2a2e3898975c57.', STARTKEY => '1', ENDKEY => ''}] 2024-12-01T17:10:11,329 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportExpiredSnapshot' as deleted. 2024-12-01T17:10:11,329 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733073011329"}]},"ts":"9223372036854775807"} 2024-12-01T17:10:11,331 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportExpiredSnapshot state from META 2024-12-01T17:10:11,332 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=194, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-01T17:10:11,333 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=194, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot in 31 msec 2024-12-01T17:10:11,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-01T17:10:11,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-01T17:10:11,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-01T17:10:11,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-01T17:10:11,455 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-01T17:10:11,455 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-01T17:10:11,456 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-01T17:10:11,456 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-01T17:10:11,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-01T17:10:11,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-01T17:10:11,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-01T17:10:11,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-01T17:10:11,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:10:11,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:10:11,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:10:11,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:10:11,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=194 2024-12-01T17:10:11,464 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-01T17:10:11,464 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-01T17:10:11,464 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-01T17:10:11,464 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-01T17:10:11,464 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportExpiredSnapshot 2024-12-01T17:10:11,465 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-01T17:10:11,471 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "emptySnaptb0-testExportExpiredSnapshot" type: DISABLED 2024-12-01T17:10:11,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportExpiredSnapshot 2024-12-01T17:10:11,474 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snapshot-testExportExpiredSnapshot" type: DISABLED 2024-12-01T17:10:11,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportExpiredSnapshot 2024-12-01T17:10:11,476 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb0-testExportExpiredSnapshot" type: DISABLED 2024-12-01T17:10:11,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportExpiredSnapshot 2024-12-01T17:10:11,495 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=807 (was 819), OpenFileDescriptor=795 (was 825), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=564 (was 690), ProcessCount=14 (was 19), AvailableMemoryMB=3660 (was 3061) - AvailableMemoryMB LEAK? - 2024-12-01T17:10:11,495 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=807 is superior to 500 2024-12-01T17:10:11,510 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=807, OpenFileDescriptor=795, MaxFileDescriptor=1048576, SystemLoadAverage=564, ProcessCount=14, AvailableMemoryMB=3660 2024-12-01T17:10:11,510 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=807 is superior to 500 2024-12-01T17:10:11,511 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T17:10:11,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=195, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-01T17:10:11,513 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-01T17:10:11,513 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:10:11,513 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testtb-testEmptyExportFileSystemState" procId is: 195 2024-12-01T17:10:11,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-01T17:10:11,514 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-01T17:10:11,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742231_1407 (size=412) 2024-12-01T17:10:11,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742231_1407 (size=412) 2024-12-01T17:10:11,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742231_1407 (size=412) 2024-12-01T17:10:11,522 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d3536aea36bbb99c4eea80f33b5f011d, NAME => 'testtb-testEmptyExportFileSystemState,,1733073011511.d3536aea36bbb99c4eea80f33b5f011d.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:10:11,522 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 649c021579ce3859e4e17a5429c35c4d, NAME => 'testtb-testEmptyExportFileSystemState,1,1733073011511.649c021579ce3859e4e17a5429c35c4d.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:10:11,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742233_1409 (size=73) 2024-12-01T17:10:11,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742232_1408 (size=73) 2024-12-01T17:10:11,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742233_1409 (size=73) 2024-12-01T17:10:11,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742233_1409 (size=73) 2024-12-01T17:10:11,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742232_1408 (size=73) 2024-12-01T17:10:11,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742232_1408 (size=73) 2024-12-01T17:10:11,533 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1733073011511.d3536aea36bbb99c4eea80f33b5f011d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:10:11,533 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing d3536aea36bbb99c4eea80f33b5f011d, disabling compactions & flushes 2024-12-01T17:10:11,533 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1733073011511.d3536aea36bbb99c4eea80f33b5f011d. 2024-12-01T17:10:11,533 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1733073011511.d3536aea36bbb99c4eea80f33b5f011d. 2024-12-01T17:10:11,533 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1733073011511.d3536aea36bbb99c4eea80f33b5f011d. after waiting 0 ms 2024-12-01T17:10:11,533 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1733073011511.d3536aea36bbb99c4eea80f33b5f011d. 2024-12-01T17:10:11,533 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1733073011511.d3536aea36bbb99c4eea80f33b5f011d. 2024-12-01T17:10:11,533 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for d3536aea36bbb99c4eea80f33b5f011d: Waiting for close lock at 1733073011533Disabling compacts and flushes for region at 1733073011533Disabling writes for close at 1733073011533Writing region close event to WAL at 1733073011533Closed at 1733073011533 2024-12-01T17:10:11,534 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1733073011511.649c021579ce3859e4e17a5429c35c4d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:10:11,534 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing 649c021579ce3859e4e17a5429c35c4d, disabling compactions & flushes 2024-12-01T17:10:11,534 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1733073011511.649c021579ce3859e4e17a5429c35c4d. 2024-12-01T17:10:11,534 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1733073011511.649c021579ce3859e4e17a5429c35c4d. 2024-12-01T17:10:11,534 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1733073011511.649c021579ce3859e4e17a5429c35c4d. after waiting 0 ms 2024-12-01T17:10:11,534 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1733073011511.649c021579ce3859e4e17a5429c35c4d. 2024-12-01T17:10:11,534 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1733073011511.649c021579ce3859e4e17a5429c35c4d. 2024-12-01T17:10:11,534 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for 649c021579ce3859e4e17a5429c35c4d: Waiting for close lock at 1733073011534Disabling compacts and flushes for region at 1733073011534Disabling writes for close at 1733073011534Writing region close event to WAL at 1733073011534Closed at 1733073011534 2024-12-01T17:10:11,535 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-01T17:10:11,535 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,,1733073011511.d3536aea36bbb99c4eea80f33b5f011d.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1733073011535"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733073011535"}]},"ts":"1733073011535"} 2024-12-01T17:10:11,535 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,1,1733073011511.649c021579ce3859e4e17a5429c35c4d.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1733073011535"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733073011535"}]},"ts":"1733073011535"} 2024-12-01T17:10:11,537 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-01T17:10:11,538 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-01T17:10:11,538 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733073011538"}]},"ts":"1733073011538"} 2024-12-01T17:10:11,539 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLING in hbase:meta 2024-12-01T17:10:11,539 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {b87883a7882d=0} racks are {/default-rack=0} 2024-12-01T17:10:11,541 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-01T17:10:11,541 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-01T17:10:11,541 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-01T17:10:11,541 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-01T17:10:11,541 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-01T17:10:11,541 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-01T17:10:11,541 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-01T17:10:11,541 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-01T17:10:11,541 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-01T17:10:11,541 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-01T17:10:11,541 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=d3536aea36bbb99c4eea80f33b5f011d, ASSIGN}, {pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=649c021579ce3859e4e17a5429c35c4d, ASSIGN}] 2024-12-01T17:10:11,542 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=d3536aea36bbb99c4eea80f33b5f011d, ASSIGN 2024-12-01T17:10:11,542 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=649c021579ce3859e4e17a5429c35c4d, ASSIGN 2024-12-01T17:10:11,543 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=d3536aea36bbb99c4eea80f33b5f011d, ASSIGN; state=OFFLINE, location=b87883a7882d,36521,1733072737896; forceNewPlan=false, retain=false 2024-12-01T17:10:11,543 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=649c021579ce3859e4e17a5429c35c4d, ASSIGN; state=OFFLINE, location=b87883a7882d,44753,1733072737741; forceNewPlan=false, retain=false 2024-12-01T17:10:11,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-01T17:10:11,694 INFO [b87883a7882d:38737 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-01T17:10:11,695 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=196 updating hbase:meta row=d3536aea36bbb99c4eea80f33b5f011d, regionState=OPENING, regionLocation=b87883a7882d,36521,1733072737896 2024-12-01T17:10:11,695 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=197 updating hbase:meta row=649c021579ce3859e4e17a5429c35c4d, regionState=OPENING, regionLocation=b87883a7882d,44753,1733072737741 2024-12-01T17:10:11,699 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=d3536aea36bbb99c4eea80f33b5f011d, ASSIGN because future has completed 2024-12-01T17:10:11,699 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=198, ppid=196, state=RUNNABLE, hasLock=false; OpenRegionProcedure d3536aea36bbb99c4eea80f33b5f011d, server=b87883a7882d,36521,1733072737896}] 2024-12-01T17:10:11,700 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=649c021579ce3859e4e17a5429c35c4d, ASSIGN because future has completed 2024-12-01T17:10:11,701 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=199, ppid=197, state=RUNNABLE, hasLock=false; OpenRegionProcedure 649c021579ce3859e4e17a5429c35c4d, server=b87883a7882d,44753,1733072737741}] 2024-12-01T17:10:11,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-01T17:10:11,858 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,,1733073011511.d3536aea36bbb99c4eea80f33b5f011d. 2024-12-01T17:10:11,858 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7752): Opening region: {ENCODED => d3536aea36bbb99c4eea80f33b5f011d, NAME => 'testtb-testEmptyExportFileSystemState,,1733073011511.d3536aea36bbb99c4eea80f33b5f011d.', STARTKEY => '', ENDKEY => '1'} 2024-12-01T17:10:11,858 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,,1733073011511.d3536aea36bbb99c4eea80f33b5f011d. service=AccessControlService 2024-12-01T17:10:11,859 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-01T17:10:11,859 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState d3536aea36bbb99c4eea80f33b5f011d 2024-12-01T17:10:11,859 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1733073011511.d3536aea36bbb99c4eea80f33b5f011d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:10:11,859 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7794): checking encryption for d3536aea36bbb99c4eea80f33b5f011d 2024-12-01T17:10:11,859 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7797): checking classloading for d3536aea36bbb99c4eea80f33b5f011d 2024-12-01T17:10:11,859 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,1,1733073011511.649c021579ce3859e4e17a5429c35c4d. 2024-12-01T17:10:11,859 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7752): Opening region: {ENCODED => 649c021579ce3859e4e17a5429c35c4d, NAME => 'testtb-testEmptyExportFileSystemState,1,1733073011511.649c021579ce3859e4e17a5429c35c4d.', STARTKEY => '1', ENDKEY => ''} 2024-12-01T17:10:11,860 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,1,1733073011511.649c021579ce3859e4e17a5429c35c4d. service=AccessControlService 2024-12-01T17:10:11,860 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-01T17:10:11,861 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 649c021579ce3859e4e17a5429c35c4d 2024-12-01T17:10:11,861 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1733073011511.649c021579ce3859e4e17a5429c35c4d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:10:11,861 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7794): checking encryption for 649c021579ce3859e4e17a5429c35c4d 2024-12-01T17:10:11,861 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7797): checking classloading for 649c021579ce3859e4e17a5429c35c4d 2024-12-01T17:10:11,861 INFO [StoreOpener-d3536aea36bbb99c4eea80f33b5f011d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d3536aea36bbb99c4eea80f33b5f011d 2024-12-01T17:10:11,863 INFO [StoreOpener-d3536aea36bbb99c4eea80f33b5f011d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d3536aea36bbb99c4eea80f33b5f011d columnFamilyName cf 2024-12-01T17:10:11,863 DEBUG [StoreOpener-d3536aea36bbb99c4eea80f33b5f011d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:10:11,863 INFO [StoreOpener-649c021579ce3859e4e17a5429c35c4d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 649c021579ce3859e4e17a5429c35c4d 2024-12-01T17:10:11,864 INFO [StoreOpener-d3536aea36bbb99c4eea80f33b5f011d-1 {}] regionserver.HStore(327): Store=d3536aea36bbb99c4eea80f33b5f011d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T17:10:11,864 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1038): replaying wal for d3536aea36bbb99c4eea80f33b5f011d 2024-12-01T17:10:11,865 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testEmptyExportFileSystemState/d3536aea36bbb99c4eea80f33b5f011d 2024-12-01T17:10:11,865 INFO [StoreOpener-649c021579ce3859e4e17a5429c35c4d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 649c021579ce3859e4e17a5429c35c4d columnFamilyName cf 2024-12-01T17:10:11,865 DEBUG [StoreOpener-649c021579ce3859e4e17a5429c35c4d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:10:11,865 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testEmptyExportFileSystemState/d3536aea36bbb99c4eea80f33b5f011d 2024-12-01T17:10:11,865 INFO [StoreOpener-649c021579ce3859e4e17a5429c35c4d-1 {}] regionserver.HStore(327): Store=649c021579ce3859e4e17a5429c35c4d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T17:10:11,865 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1048): stopping wal replay for d3536aea36bbb99c4eea80f33b5f011d 2024-12-01T17:10:11,865 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1038): replaying wal for 649c021579ce3859e4e17a5429c35c4d 2024-12-01T17:10:11,865 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1060): Cleaning up temporary data for d3536aea36bbb99c4eea80f33b5f011d 2024-12-01T17:10:11,866 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testEmptyExportFileSystemState/649c021579ce3859e4e17a5429c35c4d 2024-12-01T17:10:11,866 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testEmptyExportFileSystemState/649c021579ce3859e4e17a5429c35c4d 2024-12-01T17:10:11,866 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1048): stopping wal replay for 649c021579ce3859e4e17a5429c35c4d 2024-12-01T17:10:11,866 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1060): Cleaning up temporary data for 649c021579ce3859e4e17a5429c35c4d 2024-12-01T17:10:11,867 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1093): writing seq id for d3536aea36bbb99c4eea80f33b5f011d 2024-12-01T17:10:11,867 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1093): writing seq id for 649c021579ce3859e4e17a5429c35c4d 2024-12-01T17:10:11,868 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testEmptyExportFileSystemState/d3536aea36bbb99c4eea80f33b5f011d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T17:10:11,868 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1114): Opened d3536aea36bbb99c4eea80f33b5f011d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71323140, jitterRate=0.06279760599136353}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T17:10:11,868 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d3536aea36bbb99c4eea80f33b5f011d 2024-12-01T17:10:11,869 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testEmptyExportFileSystemState/649c021579ce3859e4e17a5429c35c4d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T17:10:11,869 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1006): Region open journal for d3536aea36bbb99c4eea80f33b5f011d: Running coprocessor pre-open hook at 1733073011859Writing region info on filesystem at 1733073011859Initializing all the Stores at 1733073011861 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733073011861Cleaning up temporary data from old regions at 1733073011865 (+4 ms)Running coprocessor post-open hooks at 1733073011868 (+3 ms)Region opened successfully at 1733073011869 (+1 ms) 2024-12-01T17:10:11,869 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1114): Opened 649c021579ce3859e4e17a5429c35c4d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60648189, jitterRate=-0.0962715595960617}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T17:10:11,869 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 649c021579ce3859e4e17a5429c35c4d 2024-12-01T17:10:11,869 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1006): Region open journal for 649c021579ce3859e4e17a5429c35c4d: Running coprocessor pre-open hook at 1733073011861Writing region info on filesystem at 1733073011861Initializing all the Stores at 1733073011863 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733073011863Cleaning up temporary data from old regions at 1733073011866 (+3 ms)Running coprocessor post-open hooks at 1733073011869 (+3 ms)Region opened successfully at 1733073011869 2024-12-01T17:10:11,869 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,,1733073011511.d3536aea36bbb99c4eea80f33b5f011d., pid=198, masterSystemTime=1733073011853 2024-12-01T17:10:11,870 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,1,1733073011511.649c021579ce3859e4e17a5429c35c4d., pid=199, masterSystemTime=1733073011855 2024-12-01T17:10:11,871 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,1,1733073011511.649c021579ce3859e4e17a5429c35c4d. 2024-12-01T17:10:11,871 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,1,1733073011511.649c021579ce3859e4e17a5429c35c4d. 2024-12-01T17:10:11,872 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=197 updating hbase:meta row=649c021579ce3859e4e17a5429c35c4d, regionState=OPEN, openSeqNum=2, regionLocation=b87883a7882d,44753,1733072737741 2024-12-01T17:10:11,872 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,,1733073011511.d3536aea36bbb99c4eea80f33b5f011d. 2024-12-01T17:10:11,872 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,,1733073011511.d3536aea36bbb99c4eea80f33b5f011d. 2024-12-01T17:10:11,872 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=196 updating hbase:meta row=d3536aea36bbb99c4eea80f33b5f011d, regionState=OPEN, openSeqNum=2, regionLocation=b87883a7882d,36521,1733072737896 2024-12-01T17:10:11,873 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=199, ppid=197, state=RUNNABLE, hasLock=false; OpenRegionProcedure 649c021579ce3859e4e17a5429c35c4d, server=b87883a7882d,44753,1733072737741 because future has completed 2024-12-01T17:10:11,873 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=198, ppid=196, state=RUNNABLE, hasLock=false; OpenRegionProcedure d3536aea36bbb99c4eea80f33b5f011d, server=b87883a7882d,36521,1733072737896 because future has completed 2024-12-01T17:10:11,875 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=199, resume processing ppid=197 2024-12-01T17:10:11,875 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=199, ppid=197, state=SUCCESS, hasLock=false; OpenRegionProcedure 649c021579ce3859e4e17a5429c35c4d, server=b87883a7882d,44753,1733072737741 in 172 msec 2024-12-01T17:10:11,876 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=198, resume processing ppid=196 2024-12-01T17:10:11,876 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=197, ppid=195, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=649c021579ce3859e4e17a5429c35c4d, ASSIGN in 334 msec 2024-12-01T17:10:11,876 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=198, ppid=196, state=SUCCESS, hasLock=false; OpenRegionProcedure d3536aea36bbb99c4eea80f33b5f011d, server=b87883a7882d,36521,1733072737896 in 175 msec 2024-12-01T17:10:11,877 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=196, resume processing ppid=195 2024-12-01T17:10:11,877 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=196, ppid=195, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=d3536aea36bbb99c4eea80f33b5f011d, ASSIGN in 335 msec 2024-12-01T17:10:11,878 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-01T17:10:11,878 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733073011878"}]},"ts":"1733073011878"} 2024-12-01T17:10:11,879 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLED in hbase:meta 2024-12-01T17:10:11,880 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-01T17:10:11,880 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testEmptyExportFileSystemState jenkins: RWXCA 2024-12-01T17:10:11,882 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36521 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-01T17:10:11,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:10:11,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:10:11,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:10:11,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:10:11,939 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-01T17:10:11,939 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-01T17:10:11,939 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-01T17:10:11,939 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-01T17:10:11,940 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-01T17:10:11,940 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-01T17:10:11,940 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-01T17:10:11,940 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-01T17:10:11,941 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=195, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState in 427 msec 2024-12-01T17:10:12,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-01T17:10:12,141 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-01T17:10:12,142 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testEmptyExportFileSystemState get assigned. Timeout = 60000ms 2024-12-01T17:10:12,142 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T17:10:12,148 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44753 {}] regionserver.StoreScanner(1138): Switch to stream read (scanned=32778 bytes) of info 2024-12-01T17:10:12,153 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testEmptyExportFileSystemState assigned to meta. Checking AM states. 2024-12-01T17:10:12,153 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T17:10:12,153 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testEmptyExportFileSystemState assigned. 2024-12-01T17:10:12,153 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-01T17:10:12,155 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-01T17:10:12,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733073012155 (current time:1733073012155). 2024-12-01T17:10:12,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-01T17:10:12,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-01T17:10:12,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-01T17:10:12,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30307be7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:10:12,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:10:12,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:10:12,157 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:10:12,157 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:10:12,157 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:10:12,157 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3521b6cd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:10:12,157 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:10:12,157 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:10:12,158 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:10:12,158 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55990, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:10:12,159 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@411f0c05, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:10:12,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:10:12,160 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:10:12,160 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:10:12,160 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:47424, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:10:12,161 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737. 2024-12-01T17:10:12,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:10:12,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:10:12,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:10:12,162 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:10:12,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ce6e961, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:10:12,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:10:12,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:10:12,163 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:10:12,163 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:10:12,163 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:10:12,163 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b8fa1a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:10:12,163 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:10:12,163 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:10:12,164 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:10:12,164 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56000, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:10:12,165 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@526282aa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:10:12,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:10:12,166 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:10:12,167 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:10:12,168 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:47430, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:10:12,169 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:10:12,169 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:10:12,170 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40074, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:10:12,172 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737. 2024-12-01T17:10:12,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor278.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:10:12,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:10:12,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:10:12,172 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:10:12,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-01T17:10:12,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-01T17:10:12,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=200, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-01T17:10:12,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 200 2024-12-01T17:10:12,175 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-01T17:10:12,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-12-01T17:10:12,176 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-01T17:10:12,178 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-01T17:10:12,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742234_1410 (size=185) 2024-12-01T17:10:12,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742234_1410 (size=185) 2024-12-01T17:10:12,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742234_1410 (size=185) 2024-12-01T17:10:12,185 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-01T17:10:12,185 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d3536aea36bbb99c4eea80f33b5f011d}, {pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 649c021579ce3859e4e17a5429c35c4d}] 2024-12-01T17:10:12,186 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d3536aea36bbb99c4eea80f33b5f011d 2024-12-01T17:10:12,186 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 649c021579ce3859e4e17a5429c35c4d 2024-12-01T17:10:12,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-12-01T17:10:12,338 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44753 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=202 2024-12-01T17:10:12,338 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36521 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=201 2024-12-01T17:10:12,338 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1733073011511.649c021579ce3859e4e17a5429c35c4d. 2024-12-01T17:10:12,338 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1733073011511.d3536aea36bbb99c4eea80f33b5f011d. 2024-12-01T17:10:12,338 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.HRegion(2603): Flush status journal for d3536aea36bbb99c4eea80f33b5f011d: 2024-12-01T17:10:12,338 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegion(2603): Flush status journal for 649c021579ce3859e4e17a5429c35c4d: 2024-12-01T17:10:12,338 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1733073011511.d3536aea36bbb99c4eea80f33b5f011d. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-01T17:10:12,339 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1733073011511.649c021579ce3859e4e17a5429c35c4d. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-01T17:10:12,339 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1733073011511.d3536aea36bbb99c4eea80f33b5f011d.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-01T17:10:12,339 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1733073011511.649c021579ce3859e4e17a5429c35c4d.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-01T17:10:12,339 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:10:12,339 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:10:12,339 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-01T17:10:12,339 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-01T17:10:12,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742235_1411 (size=76) 2024-12-01T17:10:12,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742235_1411 (size=76) 2024-12-01T17:10:12,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742236_1412 (size=76) 2024-12-01T17:10:12,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742236_1412 (size=76) 2024-12-01T17:10:12,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742235_1411 (size=76) 2024-12-01T17:10:12,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742236_1412 (size=76) 2024-12-01T17:10:12,352 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1733073011511.d3536aea36bbb99c4eea80f33b5f011d. 2024-12-01T17:10:12,352 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=201 2024-12-01T17:10:12,352 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1733073011511.649c021579ce3859e4e17a5429c35c4d. 2024-12-01T17:10:12,352 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=202 2024-12-01T17:10:12,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=201 2024-12-01T17:10:12,353 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region d3536aea36bbb99c4eea80f33b5f011d 2024-12-01T17:10:12,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=202 2024-12-01T17:10:12,353 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 649c021579ce3859e4e17a5429c35c4d 2024-12-01T17:10:12,353 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d3536aea36bbb99c4eea80f33b5f011d 2024-12-01T17:10:12,353 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 649c021579ce3859e4e17a5429c35c4d 2024-12-01T17:10:12,355 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=201, ppid=200, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d3536aea36bbb99c4eea80f33b5f011d in 169 msec 2024-12-01T17:10:12,357 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=202, resume processing ppid=200 2024-12-01T17:10:12,357 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=202, ppid=200, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 649c021579ce3859e4e17a5429c35c4d in 169 msec 2024-12-01T17:10:12,357 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-01T17:10:12,357 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-01T17:10:12,358 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-01T17:10:12,358 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testEmptyExportFileSystemState 2024-12-01T17:10:12,359 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-01T17:10:12,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742237_1413 (size=567) 2024-12-01T17:10:12,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742237_1413 (size=567) 2024-12-01T17:10:12,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742237_1413 (size=567) 2024-12-01T17:10:12,368 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-01T17:10:12,372 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-01T17:10:12,372 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-01T17:10:12,374 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-01T17:10:12,374 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 200 2024-12-01T17:10:12,375 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=200, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 201 msec 2024-12-01T17:10:12,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-12-01T17:10:12,492 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-01T17:10:12,501 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='0ff96dbe5a3eeab9a6cddebf2a199e04e', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,,1733073011511.d3536aea36bbb99c4eea80f33b5f011d., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:10:12,502 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='1fe2165928bbaf3ca83fcd2e70fec82f1', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1733073011511.649c021579ce3859e4e17a5429c35c4d., hostname=b87883a7882d,44753,1733072737741, seqNum=2] 2024-12-01T17:10:12,503 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='2a2bffc56663c3b99f1a93825e77e9e0c', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1733073011511.649c021579ce3859e4e17a5429c35c4d., hostname=b87883a7882d,44753,1733072737741, seqNum=2] 2024-12-01T17:10:12,503 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='3a20c7f3634f30a58b8a993f45c1f2e65', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1733073011511.649c021579ce3859e4e17a5429c35c4d., hostname=b87883a7882d,44753,1733072737741, seqNum=2] 2024-12-01T17:10:12,504 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='4f707c86e1936e975c88d98bbd970e98e', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1733073011511.649c021579ce3859e4e17a5429c35c4d., hostname=b87883a7882d,44753,1733072737741, seqNum=2] 2024-12-01T17:10:12,508 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36521 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,,1733073011511.d3536aea36bbb99c4eea80f33b5f011d. with WAL disabled. Data may be lost in the event of a crash. 2024-12-01T17:10:12,509 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44753 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,1,1733073011511.649c021579ce3859e4e17a5429c35c4d. with WAL disabled. Data may be lost in the event of a crash. 2024-12-01T17:10:12,510 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-01T17:10:12,512 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-12-01T17:10:12,512 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testEmptyExportFileSystemState,,1733073011511.d3536aea36bbb99c4eea80f33b5f011d. 2024-12-01T17:10:12,512 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T17:10:12,513 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-01T17:10:12,517 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-01T17:10:12,521 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-01T17:10:12,522 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-01T17:10:12,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733073012522 (current time:1733073012522). 2024-12-01T17:10:12,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-01T17:10:12,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-01T17:10:12,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-01T17:10:12,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51d173b4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:10:12,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:10:12,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:10:12,524 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:10:12,524 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:10:12,524 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:10:12,524 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47519044, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:10:12,524 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:10:12,524 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:10:12,524 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:10:12,525 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56010, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:10:12,525 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@375a6f41, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:10:12,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:10:12,526 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:10:12,526 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:10:12,527 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:47442, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:10:12,527 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737. 2024-12-01T17:10:12,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:10:12,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:10:12,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:10:12,528 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:10:12,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ccc0627, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:10:12,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:10:12,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:10:12,529 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:10:12,529 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:10:12,529 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:10:12,529 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72a4b9dd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:10:12,529 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:10:12,530 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:10:12,530 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:10:12,530 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56028, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:10:12,531 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3805c37, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:10:12,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:10:12,532 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:10:12,532 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:10:12,533 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:47456, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:10:12,535 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:10:12,535 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:10:12,536 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40076, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:10:12,537 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737. 2024-12-01T17:10:12,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor278.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:10:12,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:10:12,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:10:12,537 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:10:12,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-01T17:10:12,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-01T17:10:12,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-01T17:10:12,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-12-01T17:10:12,539 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-01T17:10:12,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-12-01T17:10:12,540 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-01T17:10:12,543 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-01T17:10:12,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742238_1414 (size=180) 2024-12-01T17:10:12,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742238_1414 (size=180) 2024-12-01T17:10:12,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742238_1414 (size=180) 2024-12-01T17:10:12,549 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-01T17:10:12,549 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d3536aea36bbb99c4eea80f33b5f011d}, {pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 649c021579ce3859e4e17a5429c35c4d}] 2024-12-01T17:10:12,550 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 649c021579ce3859e4e17a5429c35c4d 2024-12-01T17:10:12,550 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d3536aea36bbb99c4eea80f33b5f011d 2024-12-01T17:10:12,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-12-01T17:10:12,703 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44753 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=205 2024-12-01T17:10:12,703 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36521 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=204 2024-12-01T17:10:12,703 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1733073011511.649c021579ce3859e4e17a5429c35c4d. 2024-12-01T17:10:12,704 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1733073011511.d3536aea36bbb99c4eea80f33b5f011d. 2024-12-01T17:10:12,704 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2902): Flushing 649c021579ce3859e4e17a5429c35c4d 1/1 column families, dataSize=2.87 KB heapSize=6.44 KB 2024-12-01T17:10:12,704 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(2902): Flushing d3536aea36bbb99c4eea80f33b5f011d 1/1 column families, dataSize=400 B heapSize=1.09 KB 2024-12-01T17:10:12,723 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testEmptyExportFileSystemState/d3536aea36bbb99c4eea80f33b5f011d/.tmp/cf/ae3bc18c29404937bcf0887a087404ef is 71, key is 0286004b9e503d8ce368cfab5d13d29d/cf:q/1733073012508/Put/seqid=0 2024-12-01T17:10:12,723 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testEmptyExportFileSystemState/649c021579ce3859e4e17a5429c35c4d/.tmp/cf/2c556f6214464a3bb5570cce1d59da51 is 71, key is 13754b00f1b4b1c9cb36232ed25c7b3b/cf:q/1733073012509/Put/seqid=0 2024-12-01T17:10:12,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742240_1416 (size=5492) 2024-12-01T17:10:12,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742240_1416 (size=5492) 2024-12-01T17:10:12,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742240_1416 (size=5492) 2024-12-01T17:10:12,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742239_1415 (size=8120) 2024-12-01T17:10:12,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742239_1415 (size=8120) 2024-12-01T17:10:12,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742239_1415 (size=8120) 2024-12-01T17:10:12,729 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=400 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testEmptyExportFileSystemState/d3536aea36bbb99c4eea80f33b5f011d/.tmp/cf/ae3bc18c29404937bcf0887a087404ef 2024-12-01T17:10:12,729 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.87 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testEmptyExportFileSystemState/649c021579ce3859e4e17a5429c35c4d/.tmp/cf/2c556f6214464a3bb5570cce1d59da51 2024-12-01T17:10:12,733 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testEmptyExportFileSystemState/d3536aea36bbb99c4eea80f33b5f011d/.tmp/cf/ae3bc18c29404937bcf0887a087404ef as hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testEmptyExportFileSystemState/d3536aea36bbb99c4eea80f33b5f011d/cf/ae3bc18c29404937bcf0887a087404ef 2024-12-01T17:10:12,733 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testEmptyExportFileSystemState/649c021579ce3859e4e17a5429c35c4d/.tmp/cf/2c556f6214464a3bb5570cce1d59da51 as hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testEmptyExportFileSystemState/649c021579ce3859e4e17a5429c35c4d/cf/2c556f6214464a3bb5570cce1d59da51 2024-12-01T17:10:12,737 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testEmptyExportFileSystemState/d3536aea36bbb99c4eea80f33b5f011d/cf/ae3bc18c29404937bcf0887a087404ef, entries=6, sequenceid=6, filesize=5.4 K 2024-12-01T17:10:12,737 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testEmptyExportFileSystemState/649c021579ce3859e4e17a5429c35c4d/cf/2c556f6214464a3bb5570cce1d59da51, entries=44, sequenceid=6, filesize=7.9 K 2024-12-01T17:10:12,738 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(3140): Finished flush of dataSize ~400 B/400, heapSize ~1.08 KB/1104, currentSize=0 B/0 for d3536aea36bbb99c4eea80f33b5f011d in 34ms, sequenceid=6, compaction requested=false 2024-12-01T17:10:12,738 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(3140): Finished flush of dataSize ~2.87 KB/2936, heapSize ~6.42 KB/6576, currentSize=0 B/0 for 649c021579ce3859e4e17a5429c35c4d in 34ms, sequenceid=6, compaction requested=false 2024-12-01T17:10:12,738 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testEmptyExportFileSystemState' 2024-12-01T17:10:12,738 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testEmptyExportFileSystemState' 2024-12-01T17:10:12,738 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(2603): Flush status journal for d3536aea36bbb99c4eea80f33b5f011d: 2024-12-01T17:10:12,738 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2603): Flush status journal for 649c021579ce3859e4e17a5429c35c4d: 2024-12-01T17:10:12,738 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1733073011511.d3536aea36bbb99c4eea80f33b5f011d. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-01T17:10:12,738 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1733073011511.649c021579ce3859e4e17a5429c35c4d. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-01T17:10:12,738 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1733073011511.d3536aea36bbb99c4eea80f33b5f011d.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-01T17:10:12,738 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:10:12,738 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1733073011511.649c021579ce3859e4e17a5429c35c4d.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-01T17:10:12,739 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testEmptyExportFileSystemState/d3536aea36bbb99c4eea80f33b5f011d/cf/ae3bc18c29404937bcf0887a087404ef] hfiles 2024-12-01T17:10:12,739 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:10:12,739 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testEmptyExportFileSystemState/d3536aea36bbb99c4eea80f33b5f011d/cf/ae3bc18c29404937bcf0887a087404ef for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-01T17:10:12,739 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testEmptyExportFileSystemState/649c021579ce3859e4e17a5429c35c4d/cf/2c556f6214464a3bb5570cce1d59da51] hfiles 2024-12-01T17:10:12,739 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testEmptyExportFileSystemState/649c021579ce3859e4e17a5429c35c4d/cf/2c556f6214464a3bb5570cce1d59da51 for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-01T17:10:12,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742242_1418 (size=115) 2024-12-01T17:10:12,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742242_1418 (size=115) 2024-12-01T17:10:12,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742242_1418 (size=115) 2024-12-01T17:10:12,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742241_1417 (size=115) 2024-12-01T17:10:12,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742241_1417 (size=115) 2024-12-01T17:10:12,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742241_1417 (size=115) 2024-12-01T17:10:12,752 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1733073011511.649c021579ce3859e4e17a5429c35c4d. 2024-12-01T17:10:12,752 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1733073011511.d3536aea36bbb99c4eea80f33b5f011d. 2024-12-01T17:10:12,752 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=205 2024-12-01T17:10:12,752 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=204 2024-12-01T17:10:12,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=205 2024-12-01T17:10:12,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=204 2024-12-01T17:10:12,753 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 649c021579ce3859e4e17a5429c35c4d 2024-12-01T17:10:12,753 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region d3536aea36bbb99c4eea80f33b5f011d 2024-12-01T17:10:12,753 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d3536aea36bbb99c4eea80f33b5f011d 2024-12-01T17:10:12,753 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 649c021579ce3859e4e17a5429c35c4d 2024-12-01T17:10:12,755 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=204, ppid=203, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d3536aea36bbb99c4eea80f33b5f011d in 205 msec 2024-12-01T17:10:12,756 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=205, resume processing ppid=203 2024-12-01T17:10:12,756 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=205, ppid=203, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 649c021579ce3859e4e17a5429c35c4d in 205 msec 2024-12-01T17:10:12,756 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-01T17:10:12,756 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-01T17:10:12,757 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-01T17:10:12,757 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testEmptyExportFileSystemState 2024-12-01T17:10:12,757 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState 2024-12-01T17:10:12,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742243_1419 (size=645) 2024-12-01T17:10:12,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742243_1419 (size=645) 2024-12-01T17:10:12,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742243_1419 (size=645) 2024-12-01T17:10:12,766 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-01T17:10:12,770 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-01T17:10:12,770 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testEmptyExportFileSystemState 2024-12-01T17:10:12,771 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-01T17:10:12,771 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-12-01T17:10:12,772 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=203, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 233 msec 2024-12-01T17:10:12,833 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-01T17:10:12,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-12-01T17:10:12,860 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-01T17:10:12,860 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733073012860 2024-12-01T17:10:12,860 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:44451, tgtDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733073012860, rawTgtDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733073012860, srcFsUri=hdfs://localhost:44451, srcDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:10:12,884 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:44451, inputRoot=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:10:12,884 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-69539515_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733073012860, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733073012860/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-01T17:10:12,886 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-01T17:10:12,889 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733073012860/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-01T17:10:12,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742245_1421 (size=567) 2024-12-01T17:10:12,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742245_1421 (size=567) 2024-12-01T17:10:12,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742244_1420 (size=185) 2024-12-01T17:10:12,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742245_1421 (size=567) 2024-12-01T17:10:12,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742244_1420 (size=185) 2024-12-01T17:10:12,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742244_1420 (size=185) 2024-12-01T17:10:12,898 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:10:12,898 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:10:12,898 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:10:13,688 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/hadoop-14954452219184371471.jar 2024-12-01T17:10:13,688 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:10:13,688 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:10:13,741 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/hadoop-10697531344814146596.jar 2024-12-01T17:10:13,741 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:10:13,741 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:10:13,742 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:10:13,742 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:10:13,742 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:10:13,742 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:10:13,742 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-01T17:10:13,743 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-01T17:10:13,743 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-01T17:10:13,743 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-01T17:10:13,743 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-01T17:10:13,743 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-01T17:10:13,743 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-01T17:10:13,744 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-01T17:10:13,744 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-01T17:10:13,744 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-01T17:10:13,744 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-01T17:10:13,744 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:10:13,745 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:10:13,745 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-01T17:10:13,745 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:10:13,745 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:10:13,745 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-01T17:10:13,746 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-01T17:10:13,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742246_1422 (size=24020) 2024-12-01T17:10:13,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742246_1422 (size=24020) 2024-12-01T17:10:13,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742246_1422 (size=24020) 2024-12-01T17:10:13,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742247_1423 (size=77755) 2024-12-01T17:10:13,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742247_1423 (size=77755) 2024-12-01T17:10:13,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742247_1423 (size=77755) 2024-12-01T17:10:13,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742248_1424 (size=131360) 2024-12-01T17:10:13,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742248_1424 (size=131360) 2024-12-01T17:10:13,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742248_1424 (size=131360) 2024-12-01T17:10:13,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742249_1425 (size=111793) 2024-12-01T17:10:13,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742249_1425 (size=111793) 2024-12-01T17:10:13,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742249_1425 (size=111793) 2024-12-01T17:10:13,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742250_1426 (size=1832290) 2024-12-01T17:10:13,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742250_1426 (size=1832290) 2024-12-01T17:10:13,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742250_1426 (size=1832290) 2024-12-01T17:10:13,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742251_1427 (size=8360005) 2024-12-01T17:10:13,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742251_1427 (size=8360005) 2024-12-01T17:10:13,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742251_1427 (size=8360005) 2024-12-01T17:10:13,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742252_1428 (size=503880) 2024-12-01T17:10:13,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742252_1428 (size=503880) 2024-12-01T17:10:13,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742252_1428 (size=503880) 2024-12-01T17:10:13,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742253_1429 (size=6424742) 2024-12-01T17:10:13,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742253_1429 (size=6424742) 2024-12-01T17:10:13,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742253_1429 (size=6424742) 2024-12-01T17:10:13,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742254_1430 (size=440956) 2024-12-01T17:10:13,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742254_1430 (size=440956) 2024-12-01T17:10:13,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742254_1430 (size=440956) 2024-12-01T17:10:13,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742255_1431 (size=322274) 2024-12-01T17:10:13,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742255_1431 (size=322274) 2024-12-01T17:10:13,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742255_1431 (size=322274) 2024-12-01T17:10:13,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742256_1432 (size=20406) 2024-12-01T17:10:13,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742256_1432 (size=20406) 2024-12-01T17:10:13,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742256_1432 (size=20406) 2024-12-01T17:10:13,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742257_1433 (size=45609) 2024-12-01T17:10:13,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742257_1433 (size=45609) 2024-12-01T17:10:13,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742257_1433 (size=45609) 2024-12-01T17:10:13,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742258_1434 (size=136454) 2024-12-01T17:10:13,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742258_1434 (size=136454) 2024-12-01T17:10:13,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742258_1434 (size=136454) 2024-12-01T17:10:13,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742259_1435 (size=1597136) 2024-12-01T17:10:13,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742259_1435 (size=1597136) 2024-12-01T17:10:13,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742259_1435 (size=1597136) 2024-12-01T17:10:13,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742260_1436 (size=30873) 2024-12-01T17:10:13,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742260_1436 (size=30873) 2024-12-01T17:10:13,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742260_1436 (size=30873) 2024-12-01T17:10:13,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742261_1437 (size=29229) 2024-12-01T17:10:13,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742261_1437 (size=29229) 2024-12-01T17:10:13,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742261_1437 (size=29229) 2024-12-01T17:10:13,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742262_1438 (size=903864) 2024-12-01T17:10:13,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742262_1438 (size=903864) 2024-12-01T17:10:13,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742262_1438 (size=903864) 2024-12-01T17:10:13,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742263_1439 (size=5175431) 2024-12-01T17:10:13,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742263_1439 (size=5175431) 2024-12-01T17:10:13,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742263_1439 (size=5175431) 2024-12-01T17:10:13,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742264_1440 (size=232881) 2024-12-01T17:10:13,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742264_1440 (size=232881) 2024-12-01T17:10:13,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742264_1440 (size=232881) 2024-12-01T17:10:13,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742265_1441 (size=1323991) 2024-12-01T17:10:13,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742265_1441 (size=1323991) 2024-12-01T17:10:13,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742265_1441 (size=1323991) 2024-12-01T17:10:13,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742266_1442 (size=4695811) 2024-12-01T17:10:13,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742266_1442 (size=4695811) 2024-12-01T17:10:13,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742266_1442 (size=4695811) 2024-12-01T17:10:13,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742267_1443 (size=1877034) 2024-12-01T17:10:13,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742267_1443 (size=1877034) 2024-12-01T17:10:13,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742267_1443 (size=1877034) 2024-12-01T17:10:14,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742268_1444 (size=217555) 2024-12-01T17:10:14,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742268_1444 (size=217555) 2024-12-01T17:10:14,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742268_1444 (size=217555) 2024-12-01T17:10:14,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742269_1445 (size=4188619) 2024-12-01T17:10:14,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742269_1445 (size=4188619) 2024-12-01T17:10:14,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742269_1445 (size=4188619) 2024-12-01T17:10:14,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742270_1446 (size=127628) 2024-12-01T17:10:14,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742270_1446 (size=127628) 2024-12-01T17:10:14,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742270_1446 (size=127628) 2024-12-01T17:10:14,019 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-01T17:10:14,021 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'emptySnaptb0-testEmptyExportFileSystemState' hfile list 2024-12-01T17:10:14,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742271_1447 (size=7) 2024-12-01T17:10:14,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742271_1447 (size=7) 2024-12-01T17:10:14,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742271_1447 (size=7) 2024-12-01T17:10:14,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742272_1448 (size=10) 2024-12-01T17:10:14,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742272_1448 (size=10) 2024-12-01T17:10:14,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742272_1448 (size=10) 2024-12-01T17:10:14,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742273_1449 (size=303988) 2024-12-01T17:10:14,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742273_1449 (size=303988) 2024-12-01T17:10:14,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742273_1449 (size=303988) 2024-12-01T17:10:14,063 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-01T17:10:14,063 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-01T17:10:14,771 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0008_000001 (auth:SIMPLE) from 127.0.0.1:36030 2024-12-01T17:10:17,330 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-01T17:10:17,330 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-01T17:10:17,330 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-01T17:10:18,874 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0008_000001 (auth:SIMPLE) from 127.0.0.1:54498 2024-12-01T17:10:19,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742274_1450 (size=349662) 2024-12-01T17:10:19,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742274_1450 (size=349662) 2024-12-01T17:10:19,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742274_1450 (size=349662) 2024-12-01T17:10:20,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742275_1451 (size=8568) 2024-12-01T17:10:20,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742275_1451 (size=8568) 2024-12-01T17:10:20,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742275_1451 (size=8568) 2024-12-01T17:10:20,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742276_1452 (size=460) 2024-12-01T17:10:20,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742276_1452 (size=460) 2024-12-01T17:10:20,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742276_1452 (size=460) 2024-12-01T17:10:20,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742277_1453 (size=8568) 2024-12-01T17:10:20,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742277_1453 (size=8568) 2024-12-01T17:10:20,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742277_1453 (size=8568) 2024-12-01T17:10:20,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742278_1454 (size=349662) 2024-12-01T17:10:20,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742278_1454 (size=349662) 2024-12-01T17:10:20,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742278_1454 (size=349662) 2024-12-01T17:10:21,158 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-01T17:10:21,158 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-01T17:10:21,162 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: emptySnaptb0-testEmptyExportFileSystemState 2024-12-01T17:10:21,162 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-01T17:10:21,162 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-01T17:10:21,162 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-69539515_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-01T17:10:21,163 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-01T17:10:21,163 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-01T17:10:21,163 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-69539515_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733073012860/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733073012860/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-01T17:10:21,163 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733073012860/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-01T17:10:21,163 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733073012860/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-01T17:10:21,167 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testtb-testEmptyExportFileSystemState 2024-12-01T17:10:21,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=206, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-01T17:10:21,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-12-01T17:10:21,170 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733073021170"}]},"ts":"1733073021170"} 2024-12-01T17:10:21,172 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLING in hbase:meta 2024-12-01T17:10:21,172 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testEmptyExportFileSystemState to state=DISABLING 2024-12-01T17:10:21,173 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=207, ppid=206, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState}] 2024-12-01T17:10:21,174 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=d3536aea36bbb99c4eea80f33b5f011d, UNASSIGN}, {pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=649c021579ce3859e4e17a5429c35c4d, UNASSIGN}] 2024-12-01T17:10:21,175 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=649c021579ce3859e4e17a5429c35c4d, UNASSIGN 2024-12-01T17:10:21,175 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=d3536aea36bbb99c4eea80f33b5f011d, UNASSIGN 2024-12-01T17:10:21,176 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=209 updating hbase:meta row=649c021579ce3859e4e17a5429c35c4d, regionState=CLOSING, regionLocation=b87883a7882d,44753,1733072737741 2024-12-01T17:10:21,176 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=208 updating hbase:meta row=d3536aea36bbb99c4eea80f33b5f011d, regionState=CLOSING, regionLocation=b87883a7882d,36521,1733072737896 2024-12-01T17:10:21,177 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=d3536aea36bbb99c4eea80f33b5f011d, UNASSIGN because future has completed 2024-12-01T17:10:21,177 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-01T17:10:21,177 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=210, ppid=208, state=RUNNABLE, hasLock=false; CloseRegionProcedure d3536aea36bbb99c4eea80f33b5f011d, server=b87883a7882d,36521,1733072737896}] 2024-12-01T17:10:21,178 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=649c021579ce3859e4e17a5429c35c4d, UNASSIGN because future has completed 2024-12-01T17:10:21,178 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-01T17:10:21,178 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=211, ppid=209, state=RUNNABLE, hasLock=false; CloseRegionProcedure 649c021579ce3859e4e17a5429c35c4d, server=b87883a7882d,44753,1733072737741}] 2024-12-01T17:10:21,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-12-01T17:10:21,331 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(122): Close d3536aea36bbb99c4eea80f33b5f011d 2024-12-01T17:10:21,331 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-01T17:10:21,331 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1722): Closing d3536aea36bbb99c4eea80f33b5f011d, disabling compactions & flushes 2024-12-01T17:10:21,332 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1733073011511.d3536aea36bbb99c4eea80f33b5f011d. 2024-12-01T17:10:21,332 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1733073011511.d3536aea36bbb99c4eea80f33b5f011d. 2024-12-01T17:10:21,332 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(122): Close 649c021579ce3859e4e17a5429c35c4d 2024-12-01T17:10:21,332 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1733073011511.d3536aea36bbb99c4eea80f33b5f011d. after waiting 0 ms 2024-12-01T17:10:21,332 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1733073011511.d3536aea36bbb99c4eea80f33b5f011d. 2024-12-01T17:10:21,332 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-01T17:10:21,332 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1722): Closing 649c021579ce3859e4e17a5429c35c4d, disabling compactions & flushes 2024-12-01T17:10:21,332 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1733073011511.649c021579ce3859e4e17a5429c35c4d. 2024-12-01T17:10:21,332 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1733073011511.649c021579ce3859e4e17a5429c35c4d. 2024-12-01T17:10:21,332 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1733073011511.649c021579ce3859e4e17a5429c35c4d. after waiting 0 ms 2024-12-01T17:10:21,333 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1733073011511.649c021579ce3859e4e17a5429c35c4d. 2024-12-01T17:10:21,339 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testEmptyExportFileSystemState/d3536aea36bbb99c4eea80f33b5f011d/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-01T17:10:21,339 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testEmptyExportFileSystemState/649c021579ce3859e4e17a5429c35c4d/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-01T17:10:21,340 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-01T17:10:21,340 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1733073011511.d3536aea36bbb99c4eea80f33b5f011d. 2024-12-01T17:10:21,340 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-01T17:10:21,340 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1676): Region close journal for d3536aea36bbb99c4eea80f33b5f011d: Waiting for close lock at 1733073021331Running coprocessor pre-close hooks at 1733073021331Disabling compacts and flushes for region at 1733073021331Disabling writes for close at 1733073021332 (+1 ms)Writing region close event to WAL at 1733073021334 (+2 ms)Running coprocessor post-close hooks at 1733073021339 (+5 ms)Closed at 1733073021340 (+1 ms) 2024-12-01T17:10:21,340 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1733073011511.649c021579ce3859e4e17a5429c35c4d. 2024-12-01T17:10:21,340 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1676): Region close journal for 649c021579ce3859e4e17a5429c35c4d: Waiting for close lock at 1733073021332Running coprocessor pre-close hooks at 1733073021332Disabling compacts and flushes for region at 1733073021332Disabling writes for close at 1733073021332Writing region close event to WAL at 1733073021334 (+2 ms)Running coprocessor post-close hooks at 1733073021340 (+6 ms)Closed at 1733073021340 2024-12-01T17:10:21,341 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(157): Closed d3536aea36bbb99c4eea80f33b5f011d 2024-12-01T17:10:21,342 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=208 updating hbase:meta row=d3536aea36bbb99c4eea80f33b5f011d, regionState=CLOSED 2024-12-01T17:10:21,342 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(157): Closed 649c021579ce3859e4e17a5429c35c4d 2024-12-01T17:10:21,342 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=209 updating hbase:meta row=649c021579ce3859e4e17a5429c35c4d, regionState=CLOSED 2024-12-01T17:10:21,344 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=210, ppid=208, state=RUNNABLE, hasLock=false; CloseRegionProcedure d3536aea36bbb99c4eea80f33b5f011d, server=b87883a7882d,36521,1733072737896 because future has completed 2024-12-01T17:10:21,344 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=211, ppid=209, state=RUNNABLE, hasLock=false; CloseRegionProcedure 649c021579ce3859e4e17a5429c35c4d, server=b87883a7882d,44753,1733072737741 because future has completed 2024-12-01T17:10:21,349 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=210, resume processing ppid=208 2024-12-01T17:10:21,349 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=210, ppid=208, state=SUCCESS, hasLock=false; CloseRegionProcedure d3536aea36bbb99c4eea80f33b5f011d, server=b87883a7882d,36521,1733072737896 in 170 msec 2024-12-01T17:10:21,350 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=211, resume processing ppid=209 2024-12-01T17:10:21,350 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=211, ppid=209, state=SUCCESS, hasLock=false; CloseRegionProcedure 649c021579ce3859e4e17a5429c35c4d, server=b87883a7882d,44753,1733072737741 in 170 msec 2024-12-01T17:10:21,350 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=208, ppid=207, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=d3536aea36bbb99c4eea80f33b5f011d, UNASSIGN in 175 msec 2024-12-01T17:10:21,351 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=209, resume processing ppid=207 2024-12-01T17:10:21,351 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=209, ppid=207, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=649c021579ce3859e4e17a5429c35c4d, UNASSIGN in 176 msec 2024-12-01T17:10:21,353 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=207, resume processing ppid=206 2024-12-01T17:10:21,353 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=207, ppid=206, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState in 179 msec 2024-12-01T17:10:21,354 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733073021354"}]},"ts":"1733073021354"} 2024-12-01T17:10:21,355 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLED in hbase:meta 2024-12-01T17:10:21,355 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testEmptyExportFileSystemState to state=DISABLED 2024-12-01T17:10:21,356 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=206, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState in 188 msec 2024-12-01T17:10:21,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-12-01T17:10:21,490 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-01T17:10:21,491 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testtb-testEmptyExportFileSystemState 2024-12-01T17:10:21,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=212, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-01T17:10:21,492 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=212, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-01T17:10:21,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testEmptyExportFileSystemState 2024-12-01T17:10:21,493 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=212, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-01T17:10:21,495 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36521 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testEmptyExportFileSystemState 2024-12-01T17:10:21,496 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testEmptyExportFileSystemState/649c021579ce3859e4e17a5429c35c4d 2024-12-01T17:10:21,496 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testEmptyExportFileSystemState/d3536aea36bbb99c4eea80f33b5f011d 2024-12-01T17:10:21,498 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testEmptyExportFileSystemState/d3536aea36bbb99c4eea80f33b5f011d/cf, FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testEmptyExportFileSystemState/d3536aea36bbb99c4eea80f33b5f011d/recovered.edits] 2024-12-01T17:10:21,498 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testEmptyExportFileSystemState/649c021579ce3859e4e17a5429c35c4d/cf, FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testEmptyExportFileSystemState/649c021579ce3859e4e17a5429c35c4d/recovered.edits] 2024-12-01T17:10:21,501 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testEmptyExportFileSystemState/649c021579ce3859e4e17a5429c35c4d/cf/2c556f6214464a3bb5570cce1d59da51 to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testEmptyExportFileSystemState/649c021579ce3859e4e17a5429c35c4d/cf/2c556f6214464a3bb5570cce1d59da51 2024-12-01T17:10:21,501 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testEmptyExportFileSystemState/d3536aea36bbb99c4eea80f33b5f011d/cf/ae3bc18c29404937bcf0887a087404ef to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testEmptyExportFileSystemState/d3536aea36bbb99c4eea80f33b5f011d/cf/ae3bc18c29404937bcf0887a087404ef 2024-12-01T17:10:21,503 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testEmptyExportFileSystemState/649c021579ce3859e4e17a5429c35c4d/recovered.edits/9.seqid to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testEmptyExportFileSystemState/649c021579ce3859e4e17a5429c35c4d/recovered.edits/9.seqid 2024-12-01T17:10:21,503 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testEmptyExportFileSystemState/d3536aea36bbb99c4eea80f33b5f011d/recovered.edits/9.seqid to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testEmptyExportFileSystemState/d3536aea36bbb99c4eea80f33b5f011d/recovered.edits/9.seqid 2024-12-01T17:10:21,503 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testEmptyExportFileSystemState/649c021579ce3859e4e17a5429c35c4d 2024-12-01T17:10:21,503 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testEmptyExportFileSystemState/d3536aea36bbb99c4eea80f33b5f011d 2024-12-01T17:10:21,503 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testEmptyExportFileSystemState regions 2024-12-01T17:10:21,505 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=212, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-01T17:10:21,507 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testEmptyExportFileSystemState from hbase:meta 2024-12-01T17:10:21,525 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testEmptyExportFileSystemState' descriptor. 2024-12-01T17:10:21,526 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=212, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-01T17:10:21,526 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testEmptyExportFileSystemState' from region states. 2024-12-01T17:10:21,526 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,,1733073011511.d3536aea36bbb99c4eea80f33b5f011d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733073021526"}]},"ts":"9223372036854775807"} 2024-12-01T17:10:21,526 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,1,1733073011511.649c021579ce3859e4e17a5429c35c4d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733073021526"}]},"ts":"9223372036854775807"} 2024-12-01T17:10:21,529 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-01T17:10:21,529 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => d3536aea36bbb99c4eea80f33b5f011d, NAME => 'testtb-testEmptyExportFileSystemState,,1733073011511.d3536aea36bbb99c4eea80f33b5f011d.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 649c021579ce3859e4e17a5429c35c4d, NAME => 'testtb-testEmptyExportFileSystemState,1,1733073011511.649c021579ce3859e4e17a5429c35c4d.', STARTKEY => '1', ENDKEY => ''}] 2024-12-01T17:10:21,529 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testEmptyExportFileSystemState' as deleted. 2024-12-01T17:10:21,529 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733073021529"}]},"ts":"9223372036854775807"} 2024-12-01T17:10:21,531 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testEmptyExportFileSystemState state from META 2024-12-01T17:10:21,532 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=212, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-01T17:10:21,533 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=212, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState in 41 msec 2024-12-01T17:10:21,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-01T17:10:21,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-01T17:10:21,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-01T17:10:21,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-01T17:10:21,570 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-01T17:10:21,570 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-01T17:10:21,570 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-01T17:10:21,570 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-01T17:10:21,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-01T17:10:21,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-01T17:10:21,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-01T17:10:21,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-01T17:10:21,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:10:21,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:10:21,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:10:21,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:10:21,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=212 2024-12-01T17:10:21,579 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-01T17:10:21,579 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-01T17:10:21,579 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-01T17:10:21,580 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-01T17:10:21,580 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testEmptyExportFileSystemState 2024-12-01T17:10:21,580 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-01T17:10:21,585 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "emptySnaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-12-01T17:10:21,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testEmptyExportFileSystemState 2024-12-01T17:10:21,588 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-12-01T17:10:21,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testEmptyExportFileSystemState 2024-12-01T17:10:21,612 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=819 (was 807) Potentially hanging thread: Thread-6802 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1761253976_1 at /127.0.0.1:32836 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-69539515_22 at /127.0.0.1:59322 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-23 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-69539515_22 at /127.0.0.1:59826 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1396781562) connection to localhost/127.0.0.1:34615 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34615 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1396781562) connection to localhost/127.0.0.1:46051 from appattempt_1733072744857_0008_000001 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-69539515_22 at /127.0.0.1:32856 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 144958) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1761253976_1 at /127.0.0.1:59316 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=827 (was 795) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=507 (was 564), ProcessCount=17 (was 14) - ProcessCount LEAK? -, AvailableMemoryMB=3320 (was 3660) 2024-12-01T17:10:21,612 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=819 is superior to 500 2024-12-01T17:10:21,629 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=819, OpenFileDescriptor=827, MaxFileDescriptor=1048576, SystemLoadAverage=507, ProcessCount=17, AvailableMemoryMB=3320 2024-12-01T17:10:21,629 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=819 is superior to 500 2024-12-01T17:10:21,631 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T17:10:21,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=213, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum 2024-12-01T17:10:21,633 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_PRE_OPERATION 2024-12-01T17:10:21,633 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:10:21,633 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithChecksum" procId is: 213 2024-12-01T17:10:21,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-01T17:10:21,634 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-01T17:10:21,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742279_1455 (size=404) 2024-12-01T17:10:21,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742279_1455 (size=404) 2024-12-01T17:10:21,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742279_1455 (size=404) 2024-12-01T17:10:21,642 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 963f442d9f0bffbf9506e762bd786bfe, NAME => 'testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:10:21,642 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => a163adb2946bdef62e62c0b7fbe9cf4f, NAME => 'testtb-testExportWithChecksum,1,1733073021630.a163adb2946bdef62e62c0b7fbe9cf4f.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:10:21,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742280_1456 (size=65) 2024-12-01T17:10:21,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742281_1457 (size=65) 2024-12-01T17:10:21,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742280_1456 (size=65) 2024-12-01T17:10:21,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742281_1457 (size=65) 2024-12-01T17:10:21,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742281_1457 (size=65) 2024-12-01T17:10:21,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742280_1456 (size=65) 2024-12-01T17:10:21,650 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:10:21,650 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1733073021630.a163adb2946bdef62e62c0b7fbe9cf4f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:10:21,650 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1722): Closing 963f442d9f0bffbf9506e762bd786bfe, disabling compactions & flushes 2024-12-01T17:10:21,650 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1722): Closing a163adb2946bdef62e62c0b7fbe9cf4f, disabling compactions & flushes 2024-12-01T17:10:21,650 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe. 2024-12-01T17:10:21,650 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1733073021630.a163adb2946bdef62e62c0b7fbe9cf4f. 2024-12-01T17:10:21,650 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1733073021630.a163adb2946bdef62e62c0b7fbe9cf4f. 2024-12-01T17:10:21,650 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe. 2024-12-01T17:10:21,650 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1733073021630.a163adb2946bdef62e62c0b7fbe9cf4f. after waiting 0 ms 2024-12-01T17:10:21,650 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe. after waiting 0 ms 2024-12-01T17:10:21,650 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1733073021630.a163adb2946bdef62e62c0b7fbe9cf4f. 2024-12-01T17:10:21,650 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe. 2024-12-01T17:10:21,650 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe. 2024-12-01T17:10:21,650 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1733073021630.a163adb2946bdef62e62c0b7fbe9cf4f. 2024-12-01T17:10:21,650 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1676): Region close journal for 963f442d9f0bffbf9506e762bd786bfe: Waiting for close lock at 1733073021650Disabling compacts and flushes for region at 1733073021650Disabling writes for close at 1733073021650Writing region close event to WAL at 1733073021650Closed at 1733073021650 2024-12-01T17:10:21,650 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1676): Region close journal for a163adb2946bdef62e62c0b7fbe9cf4f: Waiting for close lock at 1733073021650Disabling compacts and flushes for region at 1733073021650Disabling writes for close at 1733073021650Writing region close event to WAL at 1733073021650Closed at 1733073021650 2024-12-01T17:10:21,652 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ADD_TO_META 2024-12-01T17:10:21,652 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733073021652"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733073021652"}]},"ts":"1733073021652"} 2024-12-01T17:10:21,652 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,1,1733073021630.a163adb2946bdef62e62c0b7fbe9cf4f.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733073021652"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733073021652"}]},"ts":"1733073021652"} 2024-12-01T17:10:21,655 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-01T17:10:21,655 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-01T17:10:21,656 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733073021655"}]},"ts":"1733073021655"} 2024-12-01T17:10:21,657 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLING in hbase:meta 2024-12-01T17:10:21,658 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {b87883a7882d=0} racks are {/default-rack=0} 2024-12-01T17:10:21,659 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-01T17:10:21,659 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-01T17:10:21,659 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-01T17:10:21,659 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-01T17:10:21,659 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-01T17:10:21,659 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-01T17:10:21,659 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-01T17:10:21,659 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-01T17:10:21,659 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-01T17:10:21,659 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-01T17:10:21,659 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=963f442d9f0bffbf9506e762bd786bfe, ASSIGN}, {pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=a163adb2946bdef62e62c0b7fbe9cf4f, ASSIGN}] 2024-12-01T17:10:21,661 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=a163adb2946bdef62e62c0b7fbe9cf4f, ASSIGN 2024-12-01T17:10:21,661 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=963f442d9f0bffbf9506e762bd786bfe, ASSIGN 2024-12-01T17:10:21,662 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=a163adb2946bdef62e62c0b7fbe9cf4f, ASSIGN; state=OFFLINE, location=b87883a7882d,44753,1733072737741; forceNewPlan=false, retain=false 2024-12-01T17:10:21,662 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=963f442d9f0bffbf9506e762bd786bfe, ASSIGN; state=OFFLINE, location=b87883a7882d,36521,1733072737896; forceNewPlan=false, retain=false 2024-12-01T17:10:21,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-01T17:10:21,813 INFO [b87883a7882d:38737 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-01T17:10:21,814 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=214 updating hbase:meta row=963f442d9f0bffbf9506e762bd786bfe, regionState=OPENING, regionLocation=b87883a7882d,36521,1733072737896 2024-12-01T17:10:21,814 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=215 updating hbase:meta row=a163adb2946bdef62e62c0b7fbe9cf4f, regionState=OPENING, regionLocation=b87883a7882d,44753,1733072737741 2024-12-01T17:10:21,818 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=963f442d9f0bffbf9506e762bd786bfe, ASSIGN because future has completed 2024-12-01T17:10:21,819 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=216, ppid=214, state=RUNNABLE, hasLock=false; OpenRegionProcedure 963f442d9f0bffbf9506e762bd786bfe, server=b87883a7882d,36521,1733072737896}] 2024-12-01T17:10:21,820 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=a163adb2946bdef62e62c0b7fbe9cf4f, ASSIGN because future has completed 2024-12-01T17:10:21,820 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=217, ppid=215, state=RUNNABLE, hasLock=false; OpenRegionProcedure a163adb2946bdef62e62c0b7fbe9cf4f, server=b87883a7882d,44753,1733072737741}] 2024-12-01T17:10:21,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-01T17:10:21,977 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe. 2024-12-01T17:10:21,978 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7752): Opening region: {ENCODED => 963f442d9f0bffbf9506e762bd786bfe, NAME => 'testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe.', STARTKEY => '', ENDKEY => '1'} 2024-12-01T17:10:21,978 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe. service=AccessControlService 2024-12-01T17:10:21,979 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,1,1733073021630.a163adb2946bdef62e62c0b7fbe9cf4f. 2024-12-01T17:10:21,979 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-01T17:10:21,979 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7752): Opening region: {ENCODED => a163adb2946bdef62e62c0b7fbe9cf4f, NAME => 'testtb-testExportWithChecksum,1,1733073021630.a163adb2946bdef62e62c0b7fbe9cf4f.', STARTKEY => '1', ENDKEY => ''} 2024-12-01T17:10:21,979 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 963f442d9f0bffbf9506e762bd786bfe 2024-12-01T17:10:21,979 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:10:21,980 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7794): checking encryption for 963f442d9f0bffbf9506e762bd786bfe 2024-12-01T17:10:21,980 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,1,1733073021630.a163adb2946bdef62e62c0b7fbe9cf4f. service=AccessControlService 2024-12-01T17:10:21,980 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7797): checking classloading for 963f442d9f0bffbf9506e762bd786bfe 2024-12-01T17:10:21,980 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-01T17:10:21,980 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum a163adb2946bdef62e62c0b7fbe9cf4f 2024-12-01T17:10:21,980 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1733073021630.a163adb2946bdef62e62c0b7fbe9cf4f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:10:21,980 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7794): checking encryption for a163adb2946bdef62e62c0b7fbe9cf4f 2024-12-01T17:10:21,980 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7797): checking classloading for a163adb2946bdef62e62c0b7fbe9cf4f 2024-12-01T17:10:21,982 INFO [StoreOpener-963f442d9f0bffbf9506e762bd786bfe-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 963f442d9f0bffbf9506e762bd786bfe 2024-12-01T17:10:21,982 INFO [StoreOpener-a163adb2946bdef62e62c0b7fbe9cf4f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region a163adb2946bdef62e62c0b7fbe9cf4f 2024-12-01T17:10:21,983 INFO [StoreOpener-a163adb2946bdef62e62c0b7fbe9cf4f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a163adb2946bdef62e62c0b7fbe9cf4f columnFamilyName cf 2024-12-01T17:10:21,983 INFO [StoreOpener-963f442d9f0bffbf9506e762bd786bfe-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 963f442d9f0bffbf9506e762bd786bfe columnFamilyName cf 2024-12-01T17:10:21,983 DEBUG [StoreOpener-a163adb2946bdef62e62c0b7fbe9cf4f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:10:21,983 DEBUG [StoreOpener-963f442d9f0bffbf9506e762bd786bfe-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:10:21,984 INFO [StoreOpener-a163adb2946bdef62e62c0b7fbe9cf4f-1 {}] regionserver.HStore(327): Store=a163adb2946bdef62e62c0b7fbe9cf4f/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T17:10:21,984 INFO [StoreOpener-963f442d9f0bffbf9506e762bd786bfe-1 {}] regionserver.HStore(327): Store=963f442d9f0bffbf9506e762bd786bfe/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T17:10:21,984 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1038): replaying wal for a163adb2946bdef62e62c0b7fbe9cf4f 2024-12-01T17:10:21,984 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1038): replaying wal for 963f442d9f0bffbf9506e762bd786bfe 2024-12-01T17:10:21,985 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/a163adb2946bdef62e62c0b7fbe9cf4f 2024-12-01T17:10:21,985 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/963f442d9f0bffbf9506e762bd786bfe 2024-12-01T17:10:21,985 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/a163adb2946bdef62e62c0b7fbe9cf4f 2024-12-01T17:10:21,985 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/963f442d9f0bffbf9506e762bd786bfe 2024-12-01T17:10:21,985 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1048): stopping wal replay for 963f442d9f0bffbf9506e762bd786bfe 2024-12-01T17:10:21,985 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1060): Cleaning up temporary data for 963f442d9f0bffbf9506e762bd786bfe 2024-12-01T17:10:21,985 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1048): stopping wal replay for a163adb2946bdef62e62c0b7fbe9cf4f 2024-12-01T17:10:21,985 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1060): Cleaning up temporary data for a163adb2946bdef62e62c0b7fbe9cf4f 2024-12-01T17:10:21,987 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1093): writing seq id for 963f442d9f0bffbf9506e762bd786bfe 2024-12-01T17:10:21,987 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1093): writing seq id for a163adb2946bdef62e62c0b7fbe9cf4f 2024-12-01T17:10:21,988 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/a163adb2946bdef62e62c0b7fbe9cf4f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T17:10:21,989 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/963f442d9f0bffbf9506e762bd786bfe/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T17:10:21,989 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1114): Opened 963f442d9f0bffbf9506e762bd786bfe; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58808847, jitterRate=-0.12367989122867584}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T17:10:21,989 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1114): Opened a163adb2946bdef62e62c0b7fbe9cf4f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61914724, jitterRate=-0.07739871740341187}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T17:10:21,989 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a163adb2946bdef62e62c0b7fbe9cf4f 2024-12-01T17:10:21,989 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 963f442d9f0bffbf9506e762bd786bfe 2024-12-01T17:10:21,990 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1006): Region open journal for 963f442d9f0bffbf9506e762bd786bfe: Running coprocessor pre-open hook at 1733073021980Writing region info on filesystem at 1733073021980Initializing all the Stores at 1733073021981 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733073021981Cleaning up temporary data from old regions at 1733073021985 (+4 ms)Running coprocessor post-open hooks at 1733073021989 (+4 ms)Region opened successfully at 1733073021990 (+1 ms) 2024-12-01T17:10:21,990 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1006): Region open journal for a163adb2946bdef62e62c0b7fbe9cf4f: Running coprocessor pre-open hook at 1733073021981Writing region info on filesystem at 1733073021981Initializing all the Stores at 1733073021981Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733073021981Cleaning up temporary data from old regions at 1733073021985 (+4 ms)Running coprocessor post-open hooks at 1733073021989 (+4 ms)Region opened successfully at 1733073021990 (+1 ms) 2024-12-01T17:10:21,991 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe., pid=216, masterSystemTime=1733073021972 2024-12-01T17:10:21,991 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,1,1733073021630.a163adb2946bdef62e62c0b7fbe9cf4f., pid=217, masterSystemTime=1733073021974 2024-12-01T17:10:21,992 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe. 2024-12-01T17:10:21,992 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe. 2024-12-01T17:10:21,993 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=214 updating hbase:meta row=963f442d9f0bffbf9506e762bd786bfe, regionState=OPEN, openSeqNum=2, regionLocation=b87883a7882d,36521,1733072737896 2024-12-01T17:10:21,993 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,1,1733073021630.a163adb2946bdef62e62c0b7fbe9cf4f. 2024-12-01T17:10:21,993 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,1,1733073021630.a163adb2946bdef62e62c0b7fbe9cf4f. 2024-12-01T17:10:21,994 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=215 updating hbase:meta row=a163adb2946bdef62e62c0b7fbe9cf4f, regionState=OPEN, openSeqNum=2, regionLocation=b87883a7882d,44753,1733072737741 2024-12-01T17:10:21,995 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=216, ppid=214, state=RUNNABLE, hasLock=false; OpenRegionProcedure 963f442d9f0bffbf9506e762bd786bfe, server=b87883a7882d,36521,1733072737896 because future has completed 2024-12-01T17:10:21,997 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=217, ppid=215, state=RUNNABLE, hasLock=false; OpenRegionProcedure a163adb2946bdef62e62c0b7fbe9cf4f, server=b87883a7882d,44753,1733072737741 because future has completed 2024-12-01T17:10:21,999 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=216, resume processing ppid=214 2024-12-01T17:10:21,999 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=217, resume processing ppid=215 2024-12-01T17:10:21,999 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=217, ppid=215, state=SUCCESS, hasLock=false; OpenRegionProcedure a163adb2946bdef62e62c0b7fbe9cf4f, server=b87883a7882d,44753,1733072737741 in 178 msec 2024-12-01T17:10:21,999 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=216, ppid=214, state=SUCCESS, hasLock=false; OpenRegionProcedure 963f442d9f0bffbf9506e762bd786bfe, server=b87883a7882d,36521,1733072737896 in 178 msec 2024-12-01T17:10:22,000 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=214, ppid=213, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=963f442d9f0bffbf9506e762bd786bfe, ASSIGN in 340 msec 2024-12-01T17:10:22,001 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=215, resume processing ppid=213 2024-12-01T17:10:22,001 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=215, ppid=213, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=a163adb2946bdef62e62c0b7fbe9cf4f, ASSIGN in 340 msec 2024-12-01T17:10:22,001 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-01T17:10:22,001 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733073022001"}]},"ts":"1733073022001"} 2024-12-01T17:10:22,003 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLED in hbase:meta 2024-12-01T17:10:22,003 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_POST_OPERATION 2024-12-01T17:10:22,004 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithChecksum jenkins: RWXCA 2024-12-01T17:10:22,007 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36521 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-01T17:10:22,062 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:10:22,062 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:10:22,062 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:10:22,062 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:10:22,073 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-01T17:10:22,073 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-01T17:10:22,073 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-01T17:10:22,073 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-01T17:10:22,073 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-01T17:10:22,073 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-01T17:10:22,073 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-01T17:10:22,073 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-01T17:10:22,074 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=213, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum in 442 msec 2024-12-01T17:10:22,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-01T17:10:22,260 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithChecksum completed 2024-12-01T17:10:22,260 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportWithChecksum get assigned. Timeout = 60000ms 2024-12-01T17:10:22,260 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T17:10:22,264 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44753 {}] regionserver.StoreScanner(1138): Switch to stream read (scanned=32829 bytes) of info 2024-12-01T17:10:22,267 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportWithChecksum assigned to meta. Checking AM states. 2024-12-01T17:10:22,267 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T17:10:22,267 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportWithChecksum assigned. 2024-12-01T17:10:22,267 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-01T17:10:22,270 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-01T17:10:22,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733073022270 (current time:1733073022270). 2024-12-01T17:10:22,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-01T17:10:22,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-01T17:10:22,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-01T17:10:22,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@703b372f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:10:22,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:10:22,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:10:22,273 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:10:22,273 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:10:22,273 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:10:22,273 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@81a000e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:10:22,273 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:10:22,274 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:10:22,274 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:10:22,275 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49040, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:10:22,275 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@643b3adc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:10:22,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:10:22,276 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:10:22,276 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:10:22,277 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57838, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:10:22,279 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737. 2024-12-01T17:10:22,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:10:22,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:10:22,279 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:10:22,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:10:22,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a17786, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:10:22,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:10:22,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:10:22,281 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:10:22,281 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:10:22,281 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:10:22,281 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9c7f854, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:10:22,281 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:10:22,281 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:10:22,281 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:10:22,282 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49050, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:10:22,283 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29a91047, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:10:22,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:10:22,284 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:10:22,284 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:10:22,286 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57842, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:10:22,287 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:10:22,288 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:10:22,289 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39600, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:10:22,290 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737. 2024-12-01T17:10:22,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor278.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:10:22,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:10:22,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:10:22,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-01T17:10:22,290 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:10:22,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-01T17:10:22,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=218, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-01T17:10:22,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 218 2024-12-01T17:10:22,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-12-01T17:10:22,293 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-01T17:10:22,294 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-01T17:10:22,296 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-01T17:10:22,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742282_1458 (size=161) 2024-12-01T17:10:22,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742282_1458 (size=161) 2024-12-01T17:10:22,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742282_1458 (size=161) 2024-12-01T17:10:22,302 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-01T17:10:22,302 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 963f442d9f0bffbf9506e762bd786bfe}, {pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a163adb2946bdef62e62c0b7fbe9cf4f}] 2024-12-01T17:10:22,303 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a163adb2946bdef62e62c0b7fbe9cf4f 2024-12-01T17:10:22,303 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 963f442d9f0bffbf9506e762bd786bfe 2024-12-01T17:10:22,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-12-01T17:10:22,455 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44753 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=220 2024-12-01T17:10:22,455 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36521 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=219 2024-12-01T17:10:22,455 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe. 2024-12-01T17:10:22,455 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1733073021630.a163adb2946bdef62e62c0b7fbe9cf4f. 2024-12-01T17:10:22,456 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.HRegion(2603): Flush status journal for 963f442d9f0bffbf9506e762bd786bfe: 2024-12-01T17:10:22,456 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.HRegion(2603): Flush status journal for a163adb2946bdef62e62c0b7fbe9cf4f: 2024-12-01T17:10:22,456 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe. for emptySnaptb0-testExportWithChecksum completed. 2024-12-01T17:10:22,456 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1733073021630.a163adb2946bdef62e62c0b7fbe9cf4f. for emptySnaptb0-testExportWithChecksum completed. 2024-12-01T17:10:22,456 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-01T17:10:22,456 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1733073021630.a163adb2946bdef62e62c0b7fbe9cf4f.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-01T17:10:22,456 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:10:22,456 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:10:22,456 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-01T17:10:22,456 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-01T17:10:22,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742284_1460 (size=68) 2024-12-01T17:10:22,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742284_1460 (size=68) 2024-12-01T17:10:22,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742284_1460 (size=68) 2024-12-01T17:10:22,461 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe. 2024-12-01T17:10:22,461 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=219 2024-12-01T17:10:22,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=219 2024-12-01T17:10:22,462 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 963f442d9f0bffbf9506e762bd786bfe 2024-12-01T17:10:22,462 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 963f442d9f0bffbf9506e762bd786bfe 2024-12-01T17:10:22,463 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=219, ppid=218, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 963f442d9f0bffbf9506e762bd786bfe in 161 msec 2024-12-01T17:10:22,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742283_1459 (size=68) 2024-12-01T17:10:22,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742283_1459 (size=68) 2024-12-01T17:10:22,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742283_1459 (size=68) 2024-12-01T17:10:22,469 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1733073021630.a163adb2946bdef62e62c0b7fbe9cf4f. 2024-12-01T17:10:22,469 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=220 2024-12-01T17:10:22,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=220 2024-12-01T17:10:22,469 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region a163adb2946bdef62e62c0b7fbe9cf4f 2024-12-01T17:10:22,470 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a163adb2946bdef62e62c0b7fbe9cf4f 2024-12-01T17:10:22,472 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=220, resume processing ppid=218 2024-12-01T17:10:22,472 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-01T17:10:22,472 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=220, ppid=218, state=SUCCESS, hasLock=false; SnapshotRegionProcedure a163adb2946bdef62e62c0b7fbe9cf4f in 168 msec 2024-12-01T17:10:22,473 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-01T17:10:22,473 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-01T17:10:22,473 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithChecksum 2024-12-01T17:10:22,474 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum 2024-12-01T17:10:22,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742285_1461 (size=543) 2024-12-01T17:10:22,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742285_1461 (size=543) 2024-12-01T17:10:22,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742285_1461 (size=543) 2024-12-01T17:10:22,482 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-01T17:10:22,487 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-01T17:10:22,487 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/emptySnaptb0-testExportWithChecksum 2024-12-01T17:10:22,489 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-01T17:10:22,489 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 218 2024-12-01T17:10:22,490 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=218, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 198 msec 2024-12-01T17:10:22,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-12-01T17:10:22,610 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-12-01T17:10:22,614 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='02f7d19a45a6621485de92755a89dd6e5', locateType=CURRENT is [region=testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:10:22,614 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='171e4efe79f014e28d965646aff1f5c50', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1733073021630.a163adb2946bdef62e62c0b7fbe9cf4f., hostname=b87883a7882d,44753,1733072737741, seqNum=2] 2024-12-01T17:10:22,615 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='20ffce5f53475237771b3ea5e46221d58', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1733073021630.a163adb2946bdef62e62c0b7fbe9cf4f., hostname=b87883a7882d,44753,1733072737741, seqNum=2] 2024-12-01T17:10:22,616 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='31bc134e754bf3cfbaa9caccf872f20e7', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1733073021630.a163adb2946bdef62e62c0b7fbe9cf4f., hostname=b87883a7882d,44753,1733072737741, seqNum=2] 2024-12-01T17:10:22,617 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='42503f899d8958c50573b53e3bfd70cd3', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1733073021630.a163adb2946bdef62e62c0b7fbe9cf4f., hostname=b87883a7882d,44753,1733072737741, seqNum=2] 2024-12-01T17:10:22,619 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36521 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe. with WAL disabled. Data may be lost in the event of a crash. 2024-12-01T17:10:22,619 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44753 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,1,1733073021630.a163adb2946bdef62e62c0b7fbe9cf4f. with WAL disabled. Data may be lost in the event of a crash. 2024-12-01T17:10:22,620 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-01T17:10:22,623 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithChecksum 2024-12-01T17:10:22,623 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe. 2024-12-01T17:10:22,623 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T17:10:22,624 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-01T17:10:22,629 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-01T17:10:22,633 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-01T17:10:22,635 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-01T17:10:22,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733073022636 (current time:1733073022636). 2024-12-01T17:10:22,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-01T17:10:22,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-01T17:10:22,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-01T17:10:22,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ad6fef8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:10:22,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:10:22,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:10:22,637 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:10:22,637 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:10:22,637 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:10:22,637 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@147573ea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:10:22,637 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:10:22,638 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:10:22,638 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:10:22,638 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49070, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:10:22,639 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4541434c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:10:22,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:10:22,640 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:10:22,640 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:10:22,641 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57848, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:10:22,641 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737. 2024-12-01T17:10:22,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:10:22,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:10:22,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:10:22,642 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:10:22,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ed831b7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:10:22,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:10:22,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:10:22,643 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:10:22,643 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:10:22,643 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:10:22,643 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5cbfff08, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:10:22,643 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:10:22,644 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:10:22,644 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:10:22,644 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49092, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:10:22,645 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5127f0c6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:10:22,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:10:22,646 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:10:22,646 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:10:22,647 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57854, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:10:22,648 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:10:22,648 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:10:22,649 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39612, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:10:22,650 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737. 2024-12-01T17:10:22,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor278.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:10:22,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:10:22,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:10:22,650 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:10:22,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-01T17:10:22,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-01T17:10:22,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=221, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-01T17:10:22,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 221 2024-12-01T17:10:22,652 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-01T17:10:22,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-12-01T17:10:22,653 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-01T17:10:22,655 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-01T17:10:22,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742286_1462 (size=156) 2024-12-01T17:10:22,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742286_1462 (size=156) 2024-12-01T17:10:22,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742286_1462 (size=156) 2024-12-01T17:10:22,660 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-01T17:10:22,661 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 963f442d9f0bffbf9506e762bd786bfe}, {pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a163adb2946bdef62e62c0b7fbe9cf4f}] 2024-12-01T17:10:22,661 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a163adb2946bdef62e62c0b7fbe9cf4f 2024-12-01T17:10:22,661 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 963f442d9f0bffbf9506e762bd786bfe 2024-12-01T17:10:22,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-12-01T17:10:22,813 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44753 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=223 2024-12-01T17:10:22,813 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36521 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=222 2024-12-01T17:10:22,813 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe. 2024-12-01T17:10:22,813 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1733073021630.a163adb2946bdef62e62c0b7fbe9cf4f. 2024-12-01T17:10:22,813 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(2902): Flushing 963f442d9f0bffbf9506e762bd786bfe 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-01T17:10:22,813 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(2902): Flushing a163adb2946bdef62e62c0b7fbe9cf4f 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-01T17:10:22,827 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/963f442d9f0bffbf9506e762bd786bfe/.tmp/cf/db40289d51744829afdf3b40fecdade7 is 71, key is 01cde0304abfb1458ad6346a91c68ed3/cf:q/1733073022618/Put/seqid=0 2024-12-01T17:10:22,832 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-01T17:10:22,833 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/a163adb2946bdef62e62c0b7fbe9cf4f/.tmp/cf/3498e98cef714e75b8e1c87b00c8ee0a is 71, key is 1143ffdd2091c2cc7244a062f2bf40ae/cf:q/1733073022619/Put/seqid=0 2024-12-01T17:10:22,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742287_1463 (size=5354) 2024-12-01T17:10:22,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742287_1463 (size=5354) 2024-12-01T17:10:22,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742287_1463 (size=5354) 2024-12-01T17:10:22,836 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/963f442d9f0bffbf9506e762bd786bfe/.tmp/cf/db40289d51744829afdf3b40fecdade7 2024-12-01T17:10:22,842 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/963f442d9f0bffbf9506e762bd786bfe/.tmp/cf/db40289d51744829afdf3b40fecdade7 as hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/963f442d9f0bffbf9506e762bd786bfe/cf/db40289d51744829afdf3b40fecdade7 2024-12-01T17:10:22,846 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/963f442d9f0bffbf9506e762bd786bfe/cf/db40289d51744829afdf3b40fecdade7, entries=4, sequenceid=6, filesize=5.2 K 2024-12-01T17:10:22,846 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 963f442d9f0bffbf9506e762bd786bfe in 33ms, sequenceid=6, compaction requested=false 2024-12-01T17:10:22,847 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-12-01T17:10:22,847 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(2603): Flush status journal for 963f442d9f0bffbf9506e762bd786bfe: 2024-12-01T17:10:22,847 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe. for snaptb0-testExportWithChecksum completed. 2024-12-01T17:10:22,847 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-01T17:10:22,847 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:10:22,847 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/963f442d9f0bffbf9506e762bd786bfe/cf/db40289d51744829afdf3b40fecdade7] hfiles 2024-12-01T17:10:22,847 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/963f442d9f0bffbf9506e762bd786bfe/cf/db40289d51744829afdf3b40fecdade7 for snapshot=snaptb0-testExportWithChecksum 2024-12-01T17:10:22,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742288_1464 (size=8256) 2024-12-01T17:10:22,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742288_1464 (size=8256) 2024-12-01T17:10:22,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742288_1464 (size=8256) 2024-12-01T17:10:22,854 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/a163adb2946bdef62e62c0b7fbe9cf4f/.tmp/cf/3498e98cef714e75b8e1c87b00c8ee0a 2024-12-01T17:10:22,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742289_1465 (size=107) 2024-12-01T17:10:22,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742289_1465 (size=107) 2024-12-01T17:10:22,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742289_1465 (size=107) 2024-12-01T17:10:22,859 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe. 2024-12-01T17:10:22,859 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=222 2024-12-01T17:10:22,859 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/a163adb2946bdef62e62c0b7fbe9cf4f/.tmp/cf/3498e98cef714e75b8e1c87b00c8ee0a as hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/a163adb2946bdef62e62c0b7fbe9cf4f/cf/3498e98cef714e75b8e1c87b00c8ee0a 2024-12-01T17:10:22,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=222 2024-12-01T17:10:22,860 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 963f442d9f0bffbf9506e762bd786bfe 2024-12-01T17:10:22,860 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 963f442d9f0bffbf9506e762bd786bfe 2024-12-01T17:10:22,863 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=222, ppid=221, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 963f442d9f0bffbf9506e762bd786bfe in 201 msec 2024-12-01T17:10:22,864 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/a163adb2946bdef62e62c0b7fbe9cf4f/cf/3498e98cef714e75b8e1c87b00c8ee0a, entries=46, sequenceid=6, filesize=8.1 K 2024-12-01T17:10:22,865 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for a163adb2946bdef62e62c0b7fbe9cf4f in 52ms, sequenceid=6, compaction requested=false 2024-12-01T17:10:22,865 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(2603): Flush status journal for a163adb2946bdef62e62c0b7fbe9cf4f: 2024-12-01T17:10:22,865 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1733073021630.a163adb2946bdef62e62c0b7fbe9cf4f. for snaptb0-testExportWithChecksum completed. 2024-12-01T17:10:22,865 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1733073021630.a163adb2946bdef62e62c0b7fbe9cf4f.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-01T17:10:22,865 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:10:22,865 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/a163adb2946bdef62e62c0b7fbe9cf4f/cf/3498e98cef714e75b8e1c87b00c8ee0a] hfiles 2024-12-01T17:10:22,865 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/a163adb2946bdef62e62c0b7fbe9cf4f/cf/3498e98cef714e75b8e1c87b00c8ee0a for snapshot=snaptb0-testExportWithChecksum 2024-12-01T17:10:22,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742290_1466 (size=107) 2024-12-01T17:10:22,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742290_1466 (size=107) 2024-12-01T17:10:22,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742290_1466 (size=107) 2024-12-01T17:10:22,874 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1733073021630.a163adb2946bdef62e62c0b7fbe9cf4f. 2024-12-01T17:10:22,874 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=223 2024-12-01T17:10:22,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=223 2024-12-01T17:10:22,874 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region a163adb2946bdef62e62c0b7fbe9cf4f 2024-12-01T17:10:22,874 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a163adb2946bdef62e62c0b7fbe9cf4f 2024-12-01T17:10:22,877 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=223, resume processing ppid=221 2024-12-01T17:10:22,877 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-01T17:10:22,877 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=223, ppid=221, state=SUCCESS, hasLock=false; SnapshotRegionProcedure a163adb2946bdef62e62c0b7fbe9cf4f in 215 msec 2024-12-01T17:10:22,877 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-01T17:10:22,878 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-01T17:10:22,878 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithChecksum 2024-12-01T17:10:22,878 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-01T17:10:22,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742291_1467 (size=621) 2024-12-01T17:10:22,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742291_1467 (size=621) 2024-12-01T17:10:22,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742291_1467 (size=621) 2024-12-01T17:10:22,888 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-01T17:10:22,893 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-01T17:10:22,893 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-01T17:10:22,894 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-01T17:10:22,894 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 221 2024-12-01T17:10:22,895 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=221, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 243 msec 2024-12-01T17:10:22,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-12-01T17:10:22,970 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-12-01T17:10:22,971 INFO [Time-limited test {}] snapshot.TestExportSnapshot(523): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/local-export-1733073022971 2024-12-01T17:10:22,971 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/local-export-1733073022971, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/local-export-1733073022971, srcFsUri=hdfs://localhost:44451, srcDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:10:22,994 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:44451, inputRoot=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:10:22,994 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@767e74a0, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/local-export-1733073022971, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/local-export-1733073022971/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-01T17:10:22,995 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-01T17:10:22,998 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testExportWithChecksum to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/local-export-1733073022971/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-01T17:10:23,016 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:10:23,016 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:10:23,016 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:10:23,873 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/hadoop-10826156224361785879.jar 2024-12-01T17:10:23,874 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:10:23,874 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:10:23,925 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/hadoop-5325587832830845515.jar 2024-12-01T17:10:23,926 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:10:23,926 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:10:23,926 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:10:23,926 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:10:23,926 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:10:23,927 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:10:23,927 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-01T17:10:23,927 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-01T17:10:23,927 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-01T17:10:23,928 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-01T17:10:23,928 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-01T17:10:23,928 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-01T17:10:23,928 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-01T17:10:23,928 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-01T17:10:23,928 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-01T17:10:23,929 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-01T17:10:23,929 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-01T17:10:23,929 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:10:23,929 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:10:23,929 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-01T17:10:23,929 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:10:23,930 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:10:23,930 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-01T17:10:23,930 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-01T17:10:23,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742292_1468 (size=440956) 2024-12-01T17:10:23,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742292_1468 (size=440956) 2024-12-01T17:10:23,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742292_1468 (size=440956) 2024-12-01T17:10:23,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742293_1469 (size=24020) 2024-12-01T17:10:23,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742293_1469 (size=24020) 2024-12-01T17:10:23,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742293_1469 (size=24020) 2024-12-01T17:10:23,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742294_1470 (size=77755) 2024-12-01T17:10:23,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742294_1470 (size=77755) 2024-12-01T17:10:23,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742294_1470 (size=77755) 2024-12-01T17:10:23,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742295_1471 (size=131360) 2024-12-01T17:10:23,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742295_1471 (size=131360) 2024-12-01T17:10:23,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742295_1471 (size=131360) 2024-12-01T17:10:24,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742296_1472 (size=111793) 2024-12-01T17:10:24,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742296_1472 (size=111793) 2024-12-01T17:10:24,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742296_1472 (size=111793) 2024-12-01T17:10:24,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742297_1473 (size=1832290) 2024-12-01T17:10:24,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742297_1473 (size=1832290) 2024-12-01T17:10:24,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742297_1473 (size=1832290) 2024-12-01T17:10:24,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742298_1474 (size=8360005) 2024-12-01T17:10:24,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742298_1474 (size=8360005) 2024-12-01T17:10:24,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742298_1474 (size=8360005) 2024-12-01T17:10:24,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742299_1475 (size=6424742) 2024-12-01T17:10:24,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742299_1475 (size=6424742) 2024-12-01T17:10:24,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742299_1475 (size=6424742) 2024-12-01T17:10:24,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742300_1476 (size=503880) 2024-12-01T17:10:24,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742300_1476 (size=503880) 2024-12-01T17:10:24,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742300_1476 (size=503880) 2024-12-01T17:10:24,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742301_1477 (size=322274) 2024-12-01T17:10:24,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742301_1477 (size=322274) 2024-12-01T17:10:24,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742301_1477 (size=322274) 2024-12-01T17:10:24,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742302_1478 (size=20406) 2024-12-01T17:10:24,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742302_1478 (size=20406) 2024-12-01T17:10:24,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742302_1478 (size=20406) 2024-12-01T17:10:24,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742303_1479 (size=45609) 2024-12-01T17:10:24,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742303_1479 (size=45609) 2024-12-01T17:10:24,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742303_1479 (size=45609) 2024-12-01T17:10:24,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742304_1480 (size=136454) 2024-12-01T17:10:24,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742304_1480 (size=136454) 2024-12-01T17:10:24,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742304_1480 (size=136454) 2024-12-01T17:10:24,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742305_1481 (size=1597136) 2024-12-01T17:10:24,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742305_1481 (size=1597136) 2024-12-01T17:10:24,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742305_1481 (size=1597136) 2024-12-01T17:10:24,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742306_1482 (size=30873) 2024-12-01T17:10:24,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742306_1482 (size=30873) 2024-12-01T17:10:24,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742306_1482 (size=30873) 2024-12-01T17:10:24,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742307_1483 (size=29229) 2024-12-01T17:10:24,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742307_1483 (size=29229) 2024-12-01T17:10:24,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742307_1483 (size=29229) 2024-12-01T17:10:24,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742308_1484 (size=903864) 2024-12-01T17:10:24,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742308_1484 (size=903864) 2024-12-01T17:10:24,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742308_1484 (size=903864) 2024-12-01T17:10:24,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742309_1485 (size=5175431) 2024-12-01T17:10:24,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742309_1485 (size=5175431) 2024-12-01T17:10:24,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742309_1485 (size=5175431) 2024-12-01T17:10:24,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742310_1486 (size=232881) 2024-12-01T17:10:24,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742310_1486 (size=232881) 2024-12-01T17:10:24,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742310_1486 (size=232881) 2024-12-01T17:10:24,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742311_1487 (size=1323991) 2024-12-01T17:10:24,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742311_1487 (size=1323991) 2024-12-01T17:10:24,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742311_1487 (size=1323991) 2024-12-01T17:10:24,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742312_1488 (size=4695811) 2024-12-01T17:10:24,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742312_1488 (size=4695811) 2024-12-01T17:10:24,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742312_1488 (size=4695811) 2024-12-01T17:10:24,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742313_1489 (size=1877034) 2024-12-01T17:10:24,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742313_1489 (size=1877034) 2024-12-01T17:10:24,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742313_1489 (size=1877034) 2024-12-01T17:10:24,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742314_1490 (size=217555) 2024-12-01T17:10:24,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742314_1490 (size=217555) 2024-12-01T17:10:24,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742314_1490 (size=217555) 2024-12-01T17:10:24,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742315_1491 (size=4188619) 2024-12-01T17:10:24,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742315_1491 (size=4188619) 2024-12-01T17:10:24,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742315_1491 (size=4188619) 2024-12-01T17:10:24,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742316_1492 (size=127628) 2024-12-01T17:10:24,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742316_1492 (size=127628) 2024-12-01T17:10:24,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742316_1492 (size=127628) 2024-12-01T17:10:24,201 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-01T17:10:24,202 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-01T17:10:24,204 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.1 K 2024-12-01T17:10:24,204 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.2 K 2024-12-01T17:10:24,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742317_1493 (size=441) 2024-12-01T17:10:24,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742317_1493 (size=441) 2024-12-01T17:10:24,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742317_1493 (size=441) 2024-12-01T17:10:24,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742318_1494 (size=21) 2024-12-01T17:10:24,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742318_1494 (size=21) 2024-12-01T17:10:24,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742318_1494 (size=21) 2024-12-01T17:10:24,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742319_1495 (size=304129) 2024-12-01T17:10:24,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742319_1495 (size=304129) 2024-12-01T17:10:24,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742319_1495 (size=304129) 2024-12-01T17:10:26,143 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-01T17:10:26,143 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-01T17:10:26,156 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0008_000001 (auth:SIMPLE) from 127.0.0.1:38690 2024-12-01T17:10:26,162 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_3/usercache/jenkins/appcache/application_1733072744857_0008/container_1733072744857_0008_01_000001/launch_container.sh] 2024-12-01T17:10:26,162 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_3/usercache/jenkins/appcache/application_1733072744857_0008/container_1733072744857_0008_01_000001/container_tokens] 2024-12-01T17:10:26,162 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_3/usercache/jenkins/appcache/application_1733072744857_0008/container_1733072744857_0008_01_000001/sysfs] 2024-12-01T17:10:26,827 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0009_000001 (auth:SIMPLE) from 127.0.0.1:58390 2024-12-01T17:10:27,330 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-01T17:10:27,330 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum Metrics about Tables on a single HBase RegionServer 2024-12-01T17:10:27,330 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-01T17:10:31,467 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0009_000001 (auth:SIMPLE) from 127.0.0.1:43894 2024-12-01T17:10:31,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742320_1496 (size=349827) 2024-12-01T17:10:31,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742320_1496 (size=349827) 2024-12-01T17:10:31,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742320_1496 (size=349827) 2024-12-01T17:10:32,833 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-01T17:10:33,671 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0009_000001 (auth:SIMPLE) from 127.0.0.1:52676 2024-12-01T17:10:33,671 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0009_000001 (auth:SIMPLE) from 127.0.0.1:36062 2024-12-01T17:10:35,859 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-01T17:10:37,131 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_3/usercache/jenkins/appcache/application_1733072744857_0009/container_1733072744857_0009_01_000002/launch_container.sh] 2024-12-01T17:10:37,131 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_3/usercache/jenkins/appcache/application_1733072744857_0009/container_1733072744857_0009_01_000002/container_tokens] 2024-12-01T17:10:37,131 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_3/usercache/jenkins/appcache/application_1733072744857_0009/container_1733072744857_0009_01_000002/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/a163adb2946bdef62e62c0b7fbe9cf4f/cf/3498e98cef714e75b8e1c87b00c8ee0a and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/local-export-1733073022971/archive/data/default/testtb-testExportWithChecksum/a163adb2946bdef62e62c0b7fbe9cf4f/cf/3498e98cef714e75b8e1c87b00c8ee0a. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-01T17:10:37,533 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_0/usercache/jenkins/appcache/application_1733072744857_0009/container_1733072744857_0009_01_000003/launch_container.sh] 2024-12-01T17:10:37,533 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_0/usercache/jenkins/appcache/application_1733072744857_0009/container_1733072744857_0009_01_000003/container_tokens] 2024-12-01T17:10:37,533 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_0/usercache/jenkins/appcache/application_1733072744857_0009/container_1733072744857_0009_01_000003/sysfs] 2024-12-01T17:10:37,849 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=660.99 KB, freeSize=879.35 MB, max=880 MB, blockCount=0, accesses=0, hits=0, hitRatio=0, cachingAccesses=0, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-12-01T17:10:37,928 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=662.40 KB, freeSize=879.35 MB, max=880 MB, blockCount=2, accesses=2, hits=0, hitRatio=0, cachingAccesses=2, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-12-01T17:10:37,995 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=919.14 KB, freeSize=879.10 MB, max=880 MB, blockCount=3, accesses=5, hits=2, hitRatio=40.00%, , cachingAccesses=5, cachingHits=2, cachingHitsRatio=40.00%, evictions=29, evicted=0, evictedPerRun=0.0 2024-12-01T17:10:38,101 DEBUG [master/b87883a7882d:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=2, created chunk count=10, reused chunk count=22, reuseRatio=68.75% 2024-12-01T17:10:38,101 DEBUG [master/b87883a7882d:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 Error: java.io.IOException: Checksum mismatch between hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/963f442d9f0bffbf9506e762bd786bfe/cf/db40289d51744829afdf3b40fecdade7 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/local-export-1733073022971/archive/data/default/testtb-testExportWithChecksum/963f442d9f0bffbf9506e762bd786bfe/cf/db40289d51744829afdf3b40fecdade7. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-01T17:10:38,561 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0009_000001 (auth:SIMPLE) from 127.0.0.1:36066 2024-12-01T17:10:38,562 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0009_000001 (auth:SIMPLE) from 127.0.0.1:52690 2024-12-01T17:10:40,103 INFO [regionserver/b87883a7882d:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-01T17:10:40,104 INFO [regionserver/b87883a7882d:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-01T17:10:40,173 INFO [regionserver/b87883a7882d:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-01T17:10:41,093 INFO [regionserver/b87883a7882d:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(1763): MemstoreFlusherChore requesting flush of hbase:meta,,1.1588230740 because 1588230740/ns has an old edit so flush to free WALs after random delay 4621 ms 2024-12-01T17:10:43,077 DEBUG [master/b87883a7882d:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region ced0bf6724c9afd6d078a6aaf8bb47a9 changed from -1.0 to 0.0, refreshing cache 2024-12-01T17:10:43,077 DEBUG [master/b87883a7882d:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 963f442d9f0bffbf9506e762bd786bfe changed from -1.0 to 0.0, refreshing cache 2024-12-01T17:10:43,077 DEBUG [master/b87883a7882d:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 13964bd13cf62ff2c1f3f282d989070a changed from -1.0 to 0.0, refreshing cache 2024-12-01T17:10:43,077 DEBUG [master/b87883a7882d:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region a163adb2946bdef62e62c0b7fbe9cf4f changed from -1.0 to 0.0, refreshing cache 2024-12-01T17:10:43,080 DEBUG [master/b87883a7882d:0.Chore.1 {}] rsgroup.RSGroupBasedLoadBalancer(138): Balancing RSGroup=default 2024-12-01T17:10:43,080 INFO [master/b87883a7882d:0.Chore.1 {}] rsgroup.RSGroupBasedLoadBalancer(151): Start Generate Balance plan for group: default 2024-12-01T17:10:43,080 DEBUG [master/b87883a7882d:0.Chore.1 {}] balancer.BaseLoadBalancer(619): Start Generate Balance plan for cluster. 2024-12-01T17:10:43,081 DEBUG [master/b87883a7882d:0.Chore.1 {}] balancer.BalancerClusterState(204): Hosts are {b87883a7882d=0} racks are {/default-rack=0} 2024-12-01T17:10:43,082 DEBUG [master/b87883a7882d:0.Chore.1 {}] balancer.BalancerClusterState(303): server 0 has 3 regions 2024-12-01T17:10:43,082 DEBUG [master/b87883a7882d:0.Chore.1 {}] balancer.BalancerClusterState(303): server 1 has 1 regions 2024-12-01T17:10:43,082 DEBUG [master/b87883a7882d:0.Chore.1 {}] balancer.BalancerClusterState(303): server 2 has 2 regions 2024-12-01T17:10:43,082 DEBUG [master/b87883a7882d:0.Chore.1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-01T17:10:43,082 DEBUG [master/b87883a7882d:0.Chore.1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-01T17:10:43,082 DEBUG [master/b87883a7882d:0.Chore.1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-01T17:10:43,082 INFO [master/b87883a7882d:0.Chore.1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-01T17:10:43,082 INFO [master/b87883a7882d:0.Chore.1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-01T17:10:43,082 INFO [master/b87883a7882d:0.Chore.1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-01T17:10:43,082 DEBUG [master/b87883a7882d:0.Chore.1 {}] balancer.BalancerClusterState(326): Number of tables=4, number of hosts=1, number of racks=1 2024-12-01T17:10:43,085 INFO [master/b87883a7882d:0.Chore.1 {}] balancer.StochasticLoadBalancer(403): Cluster wide - Calculating plan. may take up to 30000ms to complete. 2024-12-01T17:10:43,086 INFO [master/b87883a7882d:0.Chore.1 {}] balancer.StochasticLoadBalancer(515): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.25184137451939254, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.2886751345948129, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.8526829262536787, need balance); CPRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.8533464809192823, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=14400 2024-12-01T17:10:43,296 INFO [master/b87883a7882d:0.Chore.1 {}] balancer.StochasticLoadBalancer(562): Finished computing new moving plan. Computation took 213 ms to try 14400 different iterations. Found a solution that moves 1 regions; Going from a computed imbalance of 0.25184137451939254 to a new imbalance of 0.01597498138802549. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.16666666666666666, need balance); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.8526829262536787, need balance); CPRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.8533464809192823, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-12-01T17:10:43,300 INFO [master/b87883a7882d:0.Chore.1 {}] master.HMaster(2167): Balancer plans size is 1, the balance interval is 300000 ms, and the max number regions in transition is 6 2024-12-01T17:10:43,301 INFO [master/b87883a7882d:0.Chore.1 {}] master.HMaster(2172): balance hri=963f442d9f0bffbf9506e762bd786bfe, source=b87883a7882d,36521,1733072737896, destination=b87883a7882d,41375,1733072737967 2024-12-01T17:10:43,302 DEBUG [master/b87883a7882d:0.Chore.1 {}] procedure2.ProcedureExecutor(1139): Stored pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=963f442d9f0bffbf9506e762bd786bfe, REOPEN/MOVE 2024-12-01T17:10:43,303 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=963f442d9f0bffbf9506e762bd786bfe, REOPEN/MOVE 2024-12-01T17:10:43,305 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=224 updating hbase:meta row=963f442d9f0bffbf9506e762bd786bfe, regionState=CLOSING, regionLocation=b87883a7882d,36521,1733072737896 2024-12-01T17:10:43,308 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=963f442d9f0bffbf9506e762bd786bfe, REOPEN/MOVE because future has completed 2024-12-01T17:10:43,309 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-01T17:10:43,309 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=225, ppid=224, state=RUNNABLE, hasLock=false; CloseRegionProcedure 963f442d9f0bffbf9506e762bd786bfe, server=b87883a7882d,36521,1733072737896}] 2024-12-01T17:10:43,463 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] handler.UnassignRegionHandler(122): Close 963f442d9f0bffbf9506e762bd786bfe 2024-12-01T17:10:43,463 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-01T17:10:43,464 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(1722): Closing 963f442d9f0bffbf9506e762bd786bfe, disabling compactions & flushes 2024-12-01T17:10:43,464 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe. 2024-12-01T17:10:43,464 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe. 2024-12-01T17:10:43,464 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe. after waiting 0 ms 2024-12-01T17:10:43,464 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe. 2024-12-01T17:10:43,475 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/963f442d9f0bffbf9506e762bd786bfe/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-01T17:10:43,476 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-01T17:10:43,476 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe. 2024-12-01T17:10:43,476 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(1676): Region close journal for 963f442d9f0bffbf9506e762bd786bfe: Waiting for close lock at 1733073043464Running coprocessor pre-close hooks at 1733073043464Disabling compacts and flushes for region at 1733073043464Disabling writes for close at 1733073043464Writing region close event to WAL at 1733073043467 (+3 ms)Running coprocessor post-close hooks at 1733073043476 (+9 ms)Closed at 1733073043476 2024-12-01T17:10:43,477 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegionServer(3302): Adding 963f442d9f0bffbf9506e762bd786bfe move to b87883a7882d,41375,1733072737967 record at close sequenceid=6 2024-12-01T17:10:43,479 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] handler.UnassignRegionHandler(157): Closed 963f442d9f0bffbf9506e762bd786bfe 2024-12-01T17:10:43,480 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=224 updating hbase:meta row=963f442d9f0bffbf9506e762bd786bfe, regionState=CLOSED 2024-12-01T17:10:43,482 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=225, ppid=224, state=RUNNABLE, hasLock=false; CloseRegionProcedure 963f442d9f0bffbf9506e762bd786bfe, server=b87883a7882d,36521,1733072737896 because future has completed 2024-12-01T17:10:43,492 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=225, resume processing ppid=224 2024-12-01T17:10:43,492 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=225, ppid=224, state=SUCCESS, hasLock=false; CloseRegionProcedure 963f442d9f0bffbf9506e762bd786bfe, server=b87883a7882d,36521,1733072737896 in 177 msec 2024-12-01T17:10:43,492 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=963f442d9f0bffbf9506e762bd786bfe, REOPEN/MOVE; state=CLOSED, location=b87883a7882d,41375,1733072737967; forceNewPlan=false, retain=false 2024-12-01T17:10:43,638 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_3/usercache/jenkins/appcache/application_1733072744857_0009/container_1733072744857_0009_01_000005/launch_container.sh] 2024-12-01T17:10:43,638 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_3/usercache/jenkins/appcache/application_1733072744857_0009/container_1733072744857_0009_01_000005/container_tokens] 2024-12-01T17:10:43,638 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_3/usercache/jenkins/appcache/application_1733072744857_0009/container_1733072744857_0009_01_000005/sysfs] 2024-12-01T17:10:43,642 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_2/usercache/jenkins/appcache/application_1733072744857_0009/container_1733072744857_0009_01_000004/launch_container.sh] 2024-12-01T17:10:43,642 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_2/usercache/jenkins/appcache/application_1733072744857_0009/container_1733072744857_0009_01_000004/container_tokens] 2024-12-01T17:10:43,643 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_2/usercache/jenkins/appcache/application_1733072744857_0009/container_1733072744857_0009_01_000004/sysfs] 2024-12-01T17:10:43,643 INFO [b87883a7882d:38737 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-01T17:10:43,643 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=224 updating hbase:meta row=963f442d9f0bffbf9506e762bd786bfe, regionState=OPENING, regionLocation=b87883a7882d,41375,1733072737967 2024-12-01T17:10:43,645 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=963f442d9f0bffbf9506e762bd786bfe, REOPEN/MOVE because future has completed 2024-12-01T17:10:43,645 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=226, ppid=224, state=RUNNABLE, hasLock=false; OpenRegionProcedure 963f442d9f0bffbf9506e762bd786bfe, server=b87883a7882d,41375,1733072737967}] 2024-12-01T17:10:43,801 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe. 2024-12-01T17:10:43,801 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(7752): Opening region: {ENCODED => 963f442d9f0bffbf9506e762bd786bfe, NAME => 'testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe.', STARTKEY => '', ENDKEY => '1'} 2024-12-01T17:10:43,802 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe. service=AccessControlService 2024-12-01T17:10:43,802 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-01T17:10:43,802 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 963f442d9f0bffbf9506e762bd786bfe 2024-12-01T17:10:43,802 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:10:43,802 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(7794): checking encryption for 963f442d9f0bffbf9506e762bd786bfe 2024-12-01T17:10:43,802 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(7797): checking classloading for 963f442d9f0bffbf9506e762bd786bfe 2024-12-01T17:10:43,804 INFO [StoreOpener-963f442d9f0bffbf9506e762bd786bfe-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 963f442d9f0bffbf9506e762bd786bfe 2024-12-01T17:10:43,805 INFO [StoreOpener-963f442d9f0bffbf9506e762bd786bfe-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 963f442d9f0bffbf9506e762bd786bfe columnFamilyName cf 2024-12-01T17:10:43,805 DEBUG [StoreOpener-963f442d9f0bffbf9506e762bd786bfe-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:10:43,813 DEBUG [StoreOpener-963f442d9f0bffbf9506e762bd786bfe-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/963f442d9f0bffbf9506e762bd786bfe/cf/db40289d51744829afdf3b40fecdade7 2024-12-01T17:10:43,813 INFO [StoreOpener-963f442d9f0bffbf9506e762bd786bfe-1 {}] regionserver.HStore(327): Store=963f442d9f0bffbf9506e762bd786bfe/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T17:10:43,813 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(1038): replaying wal for 963f442d9f0bffbf9506e762bd786bfe 2024-12-01T17:10:43,814 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/963f442d9f0bffbf9506e762bd786bfe 2024-12-01T17:10:43,815 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/963f442d9f0bffbf9506e762bd786bfe 2024-12-01T17:10:43,815 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(1048): stopping wal replay for 963f442d9f0bffbf9506e762bd786bfe 2024-12-01T17:10:43,815 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(1060): Cleaning up temporary data for 963f442d9f0bffbf9506e762bd786bfe 2024-12-01T17:10:43,817 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(1093): writing seq id for 963f442d9f0bffbf9506e762bd786bfe 2024-12-01T17:10:43,818 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(1114): Opened 963f442d9f0bffbf9506e762bd786bfe; next sequenceid=10; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58925221, jitterRate=-0.12194578349590302}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T17:10:43,818 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 963f442d9f0bffbf9506e762bd786bfe 2024-12-01T17:10:43,818 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(1006): Region open journal for 963f442d9f0bffbf9506e762bd786bfe: Running coprocessor pre-open hook at 1733073043802Writing region info on filesystem at 1733073043802Initializing all the Stores at 1733073043803 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733073043803Cleaning up temporary data from old regions at 1733073043815 (+12 ms)Running coprocessor post-open hooks at 1733073043818 (+3 ms)Region opened successfully at 1733073043818 2024-12-01T17:10:43,819 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe., pid=226, masterSystemTime=1733073043797 2024-12-01T17:10:43,821 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe. 2024-12-01T17:10:43,822 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe. 2024-12-01T17:10:43,824 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=224 updating hbase:meta row=963f442d9f0bffbf9506e762bd786bfe, regionState=OPEN, openSeqNum=10, regionLocation=b87883a7882d,41375,1733072737967 2024-12-01T17:10:43,826 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=226, ppid=224, state=RUNNABLE, hasLock=false; OpenRegionProcedure 963f442d9f0bffbf9506e762bd786bfe, server=b87883a7882d,41375,1733072737967 because future has completed 2024-12-01T17:10:43,829 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=226, resume processing ppid=224 2024-12-01T17:10:43,829 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=226, ppid=224, state=SUCCESS, hasLock=false; OpenRegionProcedure 963f442d9f0bffbf9506e762bd786bfe, server=b87883a7882d,41375,1733072737967 in 182 msec 2024-12-01T17:10:43,832 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=224, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=963f442d9f0bffbf9506e762bd786bfe, REOPEN/MOVE in 528 msec 2024-12-01T17:10:43,904 DEBUG [master/b87883a7882d:0.Chore.1 {}] master.HMaster(2203): Balancer is going into sleep until next period in 300000ms 2024-12-01T17:10:43,911 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(219): Skipping table testExportExpiredSnapshot because normalization is disabled in its table properties and normalization is also disabled at table level by default 2024-12-01T17:10:43,911 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(219): Skipping table testtb-testExportWithChecksum because normalization is disabled in its table properties and normalization is also disabled at table level by default 2024-12-01T17:10:43,919 DEBUG [master/b87883a7882d:0.Chore.1 {}] janitor.CatalogJanitor(180): Error: java.io.IOException: Checksum mismatch between hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/963f442d9f0bffbf9506e762bd786bfe/cf/db40289d51744829afdf3b40fecdade7 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/local-export-1733073022971/archive/data/default/testtb-testExportWithChecksum/963f442d9f0bffbf9506e762bd786bfe/cf/db40289d51744829afdf3b40fecdade7. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) Error: java.io.IOException: Checksum mismatch between hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/a163adb2946bdef62e62c0b7fbe9cf4f/cf/3498e98cef714e75b8e1c87b00c8ee0a and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/local-export-1733073022971/archive/data/default/testtb-testExportWithChecksum/a163adb2946bdef62e62c0b7fbe9cf4f/cf/3498e98cef714e75b8e1c87b00c8ee0a. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-01T17:10:44,572 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0009_000001 (auth:SIMPLE) from 127.0.0.1:43708 2024-12-01T17:10:44,573 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0009_000001 (auth:SIMPLE) from 127.0.0.1:46920 2024-12-01T17:10:45,281 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region ced0bf6724c9afd6d078a6aaf8bb47a9, had cached 0 bytes from a total of 5216 2024-12-01T17:10:45,281 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 13964bd13cf62ff2c1f3f282d989070a, had cached 0 bytes from a total of 8394 2024-12-01T17:10:45,715 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-12-01T17:10:45,715 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=78.71 KB heapSize=124.51 KB 2024-12-01T17:10:45,757 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740/.tmp/info/1da98f9b4e9944cbb6b4a763d2e7d9d8 is 181, key is testtb-testExportWithChecksum,1,1733073021630.a163adb2946bdef62e62c0b7fbe9cf4f./info:regioninfo/1733073021994/Put/seqid=0 2024-12-01T17:10:45,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742322_1498 (size=18209) 2024-12-01T17:10:45,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742322_1498 (size=18209) 2024-12-01T17:10:45,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742322_1498 (size=18209) 2024-12-01T17:10:45,825 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=67.54 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740/.tmp/info/1da98f9b4e9944cbb6b4a763d2e7d9d8 2024-12-01T17:10:45,853 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740/.tmp/ns/c25b291d8e014fa9994bef01efc797fc is 124, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977366.2c5ba4cec0c1a2c8690fe97339265e40./ns:/1733072998098/DeleteFamily/seqid=0 2024-12-01T17:10:45,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742323_1499 (size=7924) 2024-12-01T17:10:45,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742323_1499 (size=7924) 2024-12-01T17:10:45,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742323_1499 (size=7924) 2024-12-01T17:10:45,871 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.37 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740/.tmp/ns/c25b291d8e014fa9994bef01efc797fc 2024-12-01T17:10:45,930 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740/.tmp/rep_barrier/4c3df96b8edb4b8eb3e292340e678e74 is 133, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977366.2c5ba4cec0c1a2c8690fe97339265e40./rep_barrier:/1733072998098/DeleteFamily/seqid=0 2024-12-01T17:10:45,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742324_1500 (size=8195) 2024-12-01T17:10:45,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742324_1500 (size=8195) 2024-12-01T17:10:45,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742324_1500 (size=8195) 2024-12-01T17:10:45,993 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.49 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740/.tmp/rep_barrier/4c3df96b8edb4b8eb3e292340e678e74 2024-12-01T17:10:46,046 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740/.tmp/table/2f064a8493304889af9e252d94455ee2 is 127, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733072977366.2c5ba4cec0c1a2c8690fe97339265e40./table:/1733072998098/DeleteFamily/seqid=0 2024-12-01T17:10:46,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742325_1501 (size=9051) 2024-12-01T17:10:46,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742325_1501 (size=9051) 2024-12-01T17:10:46,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742325_1501 (size=9051) 2024-12-01T17:10:46,153 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.30 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740/.tmp/table/2f064a8493304889af9e252d94455ee2 2024-12-01T17:10:46,167 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740/.tmp/info/1da98f9b4e9944cbb6b4a763d2e7d9d8 as hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740/info/1da98f9b4e9944cbb6b4a763d2e7d9d8 2024-12-01T17:10:46,173 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740/info/1da98f9b4e9944cbb6b4a763d2e7d9d8, entries=101, sequenceid=215, filesize=17.8 K 2024-12-01T17:10:46,174 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740/.tmp/ns/c25b291d8e014fa9994bef01efc797fc as hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740/ns/c25b291d8e014fa9994bef01efc797fc 2024-12-01T17:10:46,180 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740/ns/c25b291d8e014fa9994bef01efc797fc, entries=24, sequenceid=215, filesize=7.7 K 2024-12-01T17:10:46,187 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740/.tmp/rep_barrier/4c3df96b8edb4b8eb3e292340e678e74 as hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740/rep_barrier/4c3df96b8edb4b8eb3e292340e678e74 2024-12-01T17:10:46,193 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740/rep_barrier/4c3df96b8edb4b8eb3e292340e678e74, entries=22, sequenceid=215, filesize=8.0 K 2024-12-01T17:10:46,194 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740/.tmp/table/2f064a8493304889af9e252d94455ee2 as hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740/table/2f064a8493304889af9e252d94455ee2 2024-12-01T17:10:46,216 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740/table/2f064a8493304889af9e252d94455ee2, entries=39, sequenceid=215, filesize=8.8 K 2024-12-01T17:10:46,217 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~78.71 KB/80594, heapSize ~124.45 KB/127432, currentSize=0 B/0 for 1588230740 in 502ms, sequenceid=215, compaction requested=false 2024-12-01T17:10:46,218 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-01T17:10:46,232 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-01T17:10:46,861 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-12-01T17:10:48,062 INFO [regionserver/b87883a7882d:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(1763): MemstoreFlusherChore requesting flush of hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89. because 5b10cafeb6a61c25ce1e97f8b1c04b89/l has an old edit so flush to free WALs after random delay 99643 ms Error: java.io.IOException: Checksum mismatch between hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/963f442d9f0bffbf9506e762bd786bfe/cf/db40289d51744829afdf3b40fecdade7 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/local-export-1733073022971/archive/data/default/testtb-testExportWithChecksum/963f442d9f0bffbf9506e762bd786bfe/cf/db40289d51744829afdf3b40fecdade7. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) Error: java.io.IOException: Checksum mismatch between hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/a163adb2946bdef62e62c0b7fbe9cf4f/cf/3498e98cef714e75b8e1c87b00c8ee0a and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/local-export-1733073022971/archive/data/default/testtb-testExportWithChecksum/a163adb2946bdef62e62c0b7fbe9cf4f/cf/3498e98cef714e75b8e1c87b00c8ee0a. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-01T17:10:48,460 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_0/usercache/jenkins/appcache/application_1733072744857_0009/container_1733072744857_0009_01_000006/launch_container.sh] 2024-12-01T17:10:48,460 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_0/usercache/jenkins/appcache/application_1733072744857_0009/container_1733072744857_0009_01_000006/container_tokens] 2024-12-01T17:10:48,460 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_0/usercache/jenkins/appcache/application_1733072744857_0009/container_1733072744857_0009_01_000006/sysfs] 2024-12-01T17:10:48,515 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_1/usercache/jenkins/appcache/application_1733072744857_0009/container_1733072744857_0009_01_000007/launch_container.sh] 2024-12-01T17:10:48,516 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_1/usercache/jenkins/appcache/application_1733072744857_0009/container_1733072744857_0009_01_000007/container_tokens] 2024-12-01T17:10:48,516 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_1/usercache/jenkins/appcache/application_1733072744857_0009/container_1733072744857_0009_01_000007/sysfs] 2024-12-01T17:10:49,606 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0009_000001 (auth:SIMPLE) from 127.0.0.1:43720 2024-12-01T17:10:49,612 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0009_000001 (auth:SIMPLE) from 127.0.0.1:46930 2024-12-01T17:10:55,663 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 43379 2024-12-01T17:10:56,672 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0009_000001 (auth:SIMPLE) from 127.0.0.1:43098 2024-12-01T17:10:56,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742321_1497 (size=30189) 2024-12-01T17:10:56,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742321_1497 (size=30189) 2024-12-01T17:10:56,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742321_1497 (size=30189) 2024-12-01T17:10:56,760 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733072744857_0009_01_000009 is : 143 2024-12-01T17:10:56,807 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_3/usercache/jenkins/appcache/application_1733072744857_0009/container_1733072744857_0009_01_000008/launch_container.sh] 2024-12-01T17:10:56,807 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_3/usercache/jenkins/appcache/application_1733072744857_0009/container_1733072744857_0009_01_000008/container_tokens] 2024-12-01T17:10:56,807 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_3/usercache/jenkins/appcache/application_1733072744857_0009/container_1733072744857_0009_01_000008/sysfs] 2024-12-01T17:10:56,818 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_2/usercache/jenkins/appcache/application_1733072744857_0009/container_1733072744857_0009_01_000009/launch_container.sh] 2024-12-01T17:10:56,818 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_2/usercache/jenkins/appcache/application_1733072744857_0009/container_1733072744857_0009_01_000009/container_tokens] 2024-12-01T17:10:56,818 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_2/usercache/jenkins/appcache/application_1733072744857_0009/container_1733072744857_0009_01_000009/sysfs] 2024-12-01T17:10:56,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742326_1502 (size=460) 2024-12-01T17:10:56,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742326_1502 (size=460) 2024-12-01T17:10:56,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742326_1502 (size=460) 2024-12-01T17:10:56,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742327_1503 (size=30189) 2024-12-01T17:10:56,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742327_1503 (size=30189) 2024-12-01T17:10:56,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742327_1503 (size=30189) 2024-12-01T17:10:57,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742328_1504 (size=349827) 2024-12-01T17:10:57,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742328_1504 (size=349827) 2024-12-01T17:10:57,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742328_1504 (size=349827) 2024-12-01T17:10:57,051 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0009_000001 (auth:SIMPLE) from 127.0.0.1:44026 2024-12-01T17:10:58,559 ERROR [Time-limited test {}] snapshot.ExportSnapshot(1239): Snapshot export failed org.apache.hadoop.hbase.snapshot.ExportSnapshotException: Task failed task_1733072744857_0009_m_000001 Job failed as tasks failed. failedMaps:1 failedReduces:0 killedMaps:0 killedReduces: 0 at org.apache.hadoop.hbase.snapshot.ExportSnapshot.runCopyJob(ExportSnapshot.java:947) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1216) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:570) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemState(TestExportSnapshot.java:400) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportWithChecksum(TestExportSnapshot.java:285) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T17:10:58,560 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733073058560 2024-12-01T17:10:58,561 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:44451, tgtDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733073058560, rawTgtDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733073058560, srcFsUri=hdfs://localhost:44451, srcDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:10:58,603 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:44451, inputRoot=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:10:58,603 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-69539515_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733073058560, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733073058560/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-01T17:10:58,626 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-01T17:10:58,696 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testExportWithChecksum to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733073058560/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-01T17:10:58,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742329_1505 (size=621) 2024-12-01T17:10:58,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742329_1505 (size=621) 2024-12-01T17:10:58,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742329_1505 (size=621) 2024-12-01T17:10:58,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742330_1506 (size=156) 2024-12-01T17:10:58,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742330_1506 (size=156) 2024-12-01T17:10:58,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742330_1506 (size=156) 2024-12-01T17:10:58,939 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:10:58,939 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:10:58,940 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:11:00,501 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/hadoop-4564402281135021257.jar 2024-12-01T17:11:00,502 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:11:00,502 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:11:00,580 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/hadoop-15730674588375689515.jar 2024-12-01T17:11:00,580 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:11:00,581 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:11:00,581 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:11:00,582 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:11:00,582 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:11:00,582 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:11:00,583 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-01T17:11:00,583 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-01T17:11:00,583 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-01T17:11:00,584 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-01T17:11:00,584 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-01T17:11:00,584 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-01T17:11:00,584 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-01T17:11:00,585 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-01T17:11:00,585 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-01T17:11:00,585 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-01T17:11:00,585 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-01T17:11:00,586 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:11:00,586 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:11:00,586 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-01T17:11:00,586 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:11:00,587 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:11:00,587 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-01T17:11:00,587 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-01T17:11:00,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742331_1507 (size=24020) 2024-12-01T17:11:00,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742331_1507 (size=24020) 2024-12-01T17:11:00,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742331_1507 (size=24020) 2024-12-01T17:11:00,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742332_1508 (size=6424742) 2024-12-01T17:11:00,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742332_1508 (size=6424742) 2024-12-01T17:11:00,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742332_1508 (size=6424742) 2024-12-01T17:11:00,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742333_1509 (size=77755) 2024-12-01T17:11:00,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742333_1509 (size=77755) 2024-12-01T17:11:00,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742333_1509 (size=77755) 2024-12-01T17:11:00,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742334_1510 (size=131360) 2024-12-01T17:11:00,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742334_1510 (size=131360) 2024-12-01T17:11:00,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742334_1510 (size=131360) 2024-12-01T17:11:00,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742335_1511 (size=111793) 2024-12-01T17:11:00,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742335_1511 (size=111793) 2024-12-01T17:11:00,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742335_1511 (size=111793) 2024-12-01T17:11:00,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742336_1512 (size=1832290) 2024-12-01T17:11:00,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742336_1512 (size=1832290) 2024-12-01T17:11:00,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742336_1512 (size=1832290) 2024-12-01T17:11:01,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742337_1513 (size=8360005) 2024-12-01T17:11:01,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742337_1513 (size=8360005) 2024-12-01T17:11:01,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742337_1513 (size=8360005) 2024-12-01T17:11:01,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742338_1514 (size=503880) 2024-12-01T17:11:01,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742338_1514 (size=503880) 2024-12-01T17:11:01,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742338_1514 (size=503880) 2024-12-01T17:11:01,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742339_1515 (size=322274) 2024-12-01T17:11:01,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742339_1515 (size=322274) 2024-12-01T17:11:01,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742339_1515 (size=322274) 2024-12-01T17:11:01,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742340_1516 (size=20406) 2024-12-01T17:11:01,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742340_1516 (size=20406) 2024-12-01T17:11:01,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742340_1516 (size=20406) 2024-12-01T17:11:01,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742341_1517 (size=45609) 2024-12-01T17:11:01,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742341_1517 (size=45609) 2024-12-01T17:11:01,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742341_1517 (size=45609) 2024-12-01T17:11:01,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742342_1518 (size=136454) 2024-12-01T17:11:01,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742342_1518 (size=136454) 2024-12-01T17:11:01,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742342_1518 (size=136454) 2024-12-01T17:11:01,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742343_1519 (size=1597136) 2024-12-01T17:11:01,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742343_1519 (size=1597136) 2024-12-01T17:11:01,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742343_1519 (size=1597136) 2024-12-01T17:11:01,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742344_1520 (size=30873) 2024-12-01T17:11:01,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742344_1520 (size=30873) 2024-12-01T17:11:01,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742344_1520 (size=30873) 2024-12-01T17:11:01,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742345_1521 (size=29229) 2024-12-01T17:11:01,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742345_1521 (size=29229) 2024-12-01T17:11:01,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742345_1521 (size=29229) 2024-12-01T17:11:01,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742346_1522 (size=903864) 2024-12-01T17:11:01,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742346_1522 (size=903864) 2024-12-01T17:11:01,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742346_1522 (size=903864) 2024-12-01T17:11:01,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742347_1523 (size=5175431) 2024-12-01T17:11:01,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742347_1523 (size=5175431) 2024-12-01T17:11:01,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742347_1523 (size=5175431) 2024-12-01T17:11:01,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742348_1524 (size=232881) 2024-12-01T17:11:01,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742348_1524 (size=232881) 2024-12-01T17:11:01,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742348_1524 (size=232881) 2024-12-01T17:11:01,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742349_1525 (size=1323991) 2024-12-01T17:11:01,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742349_1525 (size=1323991) 2024-12-01T17:11:01,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742349_1525 (size=1323991) 2024-12-01T17:11:01,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742350_1526 (size=4695811) 2024-12-01T17:11:01,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742350_1526 (size=4695811) 2024-12-01T17:11:01,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742350_1526 (size=4695811) 2024-12-01T17:11:01,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742351_1527 (size=1877034) 2024-12-01T17:11:01,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742351_1527 (size=1877034) 2024-12-01T17:11:01,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742351_1527 (size=1877034) 2024-12-01T17:11:01,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742352_1528 (size=440956) 2024-12-01T17:11:01,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742352_1528 (size=440956) 2024-12-01T17:11:01,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742352_1528 (size=440956) 2024-12-01T17:11:01,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742353_1529 (size=217555) 2024-12-01T17:11:01,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742353_1529 (size=217555) 2024-12-01T17:11:01,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742353_1529 (size=217555) 2024-12-01T17:11:01,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742354_1530 (size=4188619) 2024-12-01T17:11:01,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742354_1530 (size=4188619) 2024-12-01T17:11:01,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742354_1530 (size=4188619) 2024-12-01T17:11:01,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742355_1531 (size=127628) 2024-12-01T17:11:01,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742355_1531 (size=127628) 2024-12-01T17:11:01,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742355_1531 (size=127628) 2024-12-01T17:11:01,498 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-01T17:11:01,500 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-01T17:11:01,502 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.1 K 2024-12-01T17:11:01,502 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.2 K 2024-12-01T17:11:01,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742356_1532 (size=441) 2024-12-01T17:11:01,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742356_1532 (size=441) 2024-12-01T17:11:01,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742356_1532 (size=441) 2024-12-01T17:11:01,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742357_1533 (size=21) 2024-12-01T17:11:01,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742357_1533 (size=21) 2024-12-01T17:11:01,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742357_1533 (size=21) 2024-12-01T17:11:01,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742358_1534 (size=304079) 2024-12-01T17:11:01,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742358_1534 (size=304079) 2024-12-01T17:11:01,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742358_1534 (size=304079) 2024-12-01T17:11:03,148 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-01T17:11:03,148 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-01T17:11:03,153 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0009_000001 (auth:SIMPLE) from 127.0.0.1:44172 2024-12-01T17:11:03,174 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_0/usercache/jenkins/appcache/application_1733072744857_0009/container_1733072744857_0009_01_000001/launch_container.sh] 2024-12-01T17:11:03,174 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_0/usercache/jenkins/appcache/application_1733072744857_0009/container_1733072744857_0009_01_000001/container_tokens] 2024-12-01T17:11:03,174 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_0/usercache/jenkins/appcache/application_1733072744857_0009/container_1733072744857_0009_01_000001/sysfs] 2024-12-01T17:11:03,782 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0010_000001 (auth:SIMPLE) from 127.0.0.1:60022 2024-12-01T17:11:05,860 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-01T17:11:06,981 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region a163adb2946bdef62e62c0b7fbe9cf4f, had cached 0 bytes from a total of 8256 2024-12-01T17:11:13,930 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0010_000001 (auth:SIMPLE) from 127.0.0.1:54494 2024-12-01T17:11:14,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742359_1535 (size=349777) 2024-12-01T17:11:14,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742359_1535 (size=349777) 2024-12-01T17:11:14,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742359_1535 (size=349777) 2024-12-01T17:11:16,143 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0010_000001 (auth:SIMPLE) from 127.0.0.1:57374 2024-12-01T17:11:16,143 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0010_000001 (auth:SIMPLE) from 127.0.0.1:55488 2024-12-01T17:11:19,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742360_1536 (size=5354) 2024-12-01T17:11:19,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742360_1536 (size=5354) 2024-12-01T17:11:19,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742360_1536 (size=5354) 2024-12-01T17:11:19,528 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_0/usercache/jenkins/appcache/application_1733072744857_0010/container_1733072744857_0010_01_000003/launch_container.sh] 2024-12-01T17:11:19,528 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_0/usercache/jenkins/appcache/application_1733072744857_0010/container_1733072744857_0010_01_000003/container_tokens] 2024-12-01T17:11:19,529 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_0/usercache/jenkins/appcache/application_1733072744857_0010/container_1733072744857_0010_01_000003/sysfs] 2024-12-01T17:11:19,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742362_1538 (size=8256) 2024-12-01T17:11:19,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742362_1538 (size=8256) 2024-12-01T17:11:19,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742362_1538 (size=8256) 2024-12-01T17:11:20,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742361_1537 (size=22150) 2024-12-01T17:11:20,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742361_1537 (size=22150) 2024-12-01T17:11:20,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742361_1537 (size=22150) 2024-12-01T17:11:20,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742363_1539 (size=462) 2024-12-01T17:11:20,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742363_1539 (size=462) 2024-12-01T17:11:20,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742363_1539 (size=462) 2024-12-01T17:11:20,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742364_1540 (size=22150) 2024-12-01T17:11:20,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742364_1540 (size=22150) 2024-12-01T17:11:20,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742364_1540 (size=22150) 2024-12-01T17:11:20,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742365_1541 (size=349777) 2024-12-01T17:11:20,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742365_1541 (size=349777) 2024-12-01T17:11:20,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742365_1541 (size=349777) 2024-12-01T17:11:20,123 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0010_000001 (auth:SIMPLE) from 127.0.0.1:57390 2024-12-01T17:11:20,135 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733072744857_0010_01_000002 is : 143 2024-12-01T17:11:20,145 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_2/usercache/jenkins/appcache/application_1733072744857_0010/container_1733072744857_0010_01_000002/launch_container.sh] 2024-12-01T17:11:20,145 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_2/usercache/jenkins/appcache/application_1733072744857_0010/container_1733072744857_0010_01_000002/container_tokens] 2024-12-01T17:11:20,145 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_2/usercache/jenkins/appcache/application_1733072744857_0010/container_1733072744857_0010_01_000002/sysfs] 2024-12-01T17:11:22,161 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-01T17:11:22,162 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-01T17:11:22,167 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportWithChecksum 2024-12-01T17:11:22,167 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-01T17:11:22,168 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-01T17:11:22,168 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-69539515_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-01T17:11:22,168 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-01T17:11:22,168 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-01T17:11:22,168 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-69539515_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733073058560/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733073058560/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-01T17:11:22,168 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733073058560/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-01T17:11:22,168 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733073058560/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-01T17:11:22,174 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testtb-testExportWithChecksum 2024-12-01T17:11:22,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=227, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum 2024-12-01T17:11:22,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=227 2024-12-01T17:11:22,178 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733073082177"}]},"ts":"1733073082177"} 2024-12-01T17:11:22,180 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLING in hbase:meta 2024-12-01T17:11:22,180 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithChecksum to state=DISABLING 2024-12-01T17:11:22,180 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=228, ppid=227, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum}] 2024-12-01T17:11:22,182 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=229, ppid=228, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=963f442d9f0bffbf9506e762bd786bfe, UNASSIGN}, {pid=230, ppid=228, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=a163adb2946bdef62e62c0b7fbe9cf4f, UNASSIGN}] 2024-12-01T17:11:22,183 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=230, ppid=228, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=a163adb2946bdef62e62c0b7fbe9cf4f, UNASSIGN 2024-12-01T17:11:22,183 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=229, ppid=228, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=963f442d9f0bffbf9506e762bd786bfe, UNASSIGN 2024-12-01T17:11:22,184 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=229 updating hbase:meta row=963f442d9f0bffbf9506e762bd786bfe, regionState=CLOSING, regionLocation=b87883a7882d,41375,1733072737967 2024-12-01T17:11:22,184 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=230 updating hbase:meta row=a163adb2946bdef62e62c0b7fbe9cf4f, regionState=CLOSING, regionLocation=b87883a7882d,44753,1733072737741 2024-12-01T17:11:22,185 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=229, ppid=228, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=963f442d9f0bffbf9506e762bd786bfe, UNASSIGN because future has completed 2024-12-01T17:11:22,186 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-01T17:11:22,186 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=231, ppid=229, state=RUNNABLE, hasLock=false; CloseRegionProcedure 963f442d9f0bffbf9506e762bd786bfe, server=b87883a7882d,41375,1733072737967}] 2024-12-01T17:11:22,186 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=230, ppid=228, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=a163adb2946bdef62e62c0b7fbe9cf4f, UNASSIGN because future has completed 2024-12-01T17:11:22,187 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-01T17:11:22,187 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=232, ppid=230, state=RUNNABLE, hasLock=false; CloseRegionProcedure a163adb2946bdef62e62c0b7fbe9cf4f, server=b87883a7882d,44753,1733072737741}] 2024-12-01T17:11:22,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=227 2024-12-01T17:11:22,339 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] handler.UnassignRegionHandler(122): Close 963f442d9f0bffbf9506e762bd786bfe 2024-12-01T17:11:22,339 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-01T17:11:22,339 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1722): Closing 963f442d9f0bffbf9506e762bd786bfe, disabling compactions & flushes 2024-12-01T17:11:22,339 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe. 2024-12-01T17:11:22,339 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe. 2024-12-01T17:11:22,339 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe. after waiting 0 ms 2024-12-01T17:11:22,339 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe. 2024-12-01T17:11:22,340 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] handler.UnassignRegionHandler(122): Close a163adb2946bdef62e62c0b7fbe9cf4f 2024-12-01T17:11:22,340 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-01T17:11:22,340 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1722): Closing a163adb2946bdef62e62c0b7fbe9cf4f, disabling compactions & flushes 2024-12-01T17:11:22,340 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1733073021630.a163adb2946bdef62e62c0b7fbe9cf4f. 2024-12-01T17:11:22,340 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1733073021630.a163adb2946bdef62e62c0b7fbe9cf4f. 2024-12-01T17:11:22,340 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1733073021630.a163adb2946bdef62e62c0b7fbe9cf4f. after waiting 0 ms 2024-12-01T17:11:22,340 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1733073021630.a163adb2946bdef62e62c0b7fbe9cf4f. 2024-12-01T17:11:22,345 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/a163adb2946bdef62e62c0b7fbe9cf4f/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-01T17:11:22,345 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/963f442d9f0bffbf9506e762bd786bfe/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=9 2024-12-01T17:11:22,345 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-01T17:11:22,345 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1733073021630.a163adb2946bdef62e62c0b7fbe9cf4f. 2024-12-01T17:11:22,345 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-01T17:11:22,346 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1676): Region close journal for a163adb2946bdef62e62c0b7fbe9cf4f: Waiting for close lock at 1733073082340Running coprocessor pre-close hooks at 1733073082340Disabling compacts and flushes for region at 1733073082340Disabling writes for close at 1733073082340Writing region close event to WAL at 1733073082341 (+1 ms)Running coprocessor post-close hooks at 1733073082345 (+4 ms)Closed at 1733073082345 2024-12-01T17:11:22,346 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe. 2024-12-01T17:11:22,346 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1676): Region close journal for 963f442d9f0bffbf9506e762bd786bfe: Waiting for close lock at 1733073082339Running coprocessor pre-close hooks at 1733073082339Disabling compacts and flushes for region at 1733073082339Disabling writes for close at 1733073082339Writing region close event to WAL at 1733073082340 (+1 ms)Running coprocessor post-close hooks at 1733073082345 (+5 ms)Closed at 1733073082346 (+1 ms) 2024-12-01T17:11:22,348 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] handler.UnassignRegionHandler(157): Closed 963f442d9f0bffbf9506e762bd786bfe 2024-12-01T17:11:22,349 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] handler.UnassignRegionHandler(157): Closed a163adb2946bdef62e62c0b7fbe9cf4f 2024-12-01T17:11:22,350 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=229 updating hbase:meta row=963f442d9f0bffbf9506e762bd786bfe, regionState=CLOSED 2024-12-01T17:11:22,351 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=230 updating hbase:meta row=a163adb2946bdef62e62c0b7fbe9cf4f, regionState=CLOSED 2024-12-01T17:11:22,355 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=231, ppid=229, state=RUNNABLE, hasLock=false; CloseRegionProcedure 963f442d9f0bffbf9506e762bd786bfe, server=b87883a7882d,41375,1733072737967 because future has completed 2024-12-01T17:11:22,355 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=232, ppid=230, state=RUNNABLE, hasLock=false; CloseRegionProcedure a163adb2946bdef62e62c0b7fbe9cf4f, server=b87883a7882d,44753,1733072737741 because future has completed 2024-12-01T17:11:22,358 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=231, resume processing ppid=229 2024-12-01T17:11:22,358 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=231, ppid=229, state=SUCCESS, hasLock=false; CloseRegionProcedure 963f442d9f0bffbf9506e762bd786bfe, server=b87883a7882d,41375,1733072737967 in 170 msec 2024-12-01T17:11:22,358 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=232, resume processing ppid=230 2024-12-01T17:11:22,359 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=232, ppid=230, state=SUCCESS, hasLock=false; CloseRegionProcedure a163adb2946bdef62e62c0b7fbe9cf4f, server=b87883a7882d,44753,1733072737741 in 170 msec 2024-12-01T17:11:22,359 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=229, ppid=228, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=963f442d9f0bffbf9506e762bd786bfe, UNASSIGN in 176 msec 2024-12-01T17:11:22,360 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=230, resume processing ppid=228 2024-12-01T17:11:22,360 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=230, ppid=228, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=a163adb2946bdef62e62c0b7fbe9cf4f, UNASSIGN in 177 msec 2024-12-01T17:11:22,362 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=228, resume processing ppid=227 2024-12-01T17:11:22,362 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=228, ppid=227, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum in 181 msec 2024-12-01T17:11:22,363 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733073082363"}]},"ts":"1733073082363"} 2024-12-01T17:11:22,366 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLED in hbase:meta 2024-12-01T17:11:22,366 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithChecksum to state=DISABLED 2024-12-01T17:11:22,368 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=227, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum in 193 msec 2024-12-01T17:11:22,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=227 2024-12-01T17:11:22,490 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithChecksum completed 2024-12-01T17:11:22,491 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testtb-testExportWithChecksum 2024-12-01T17:11:22,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=233, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-01T17:11:22,493 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=233, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-01T17:11:22,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithChecksum 2024-12-01T17:11:22,494 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=233, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-01T17:11:22,496 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36521 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithChecksum 2024-12-01T17:11:22,510 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/963f442d9f0bffbf9506e762bd786bfe 2024-12-01T17:11:22,510 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/a163adb2946bdef62e62c0b7fbe9cf4f 2024-12-01T17:11:22,512 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/963f442d9f0bffbf9506e762bd786bfe/cf, FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/963f442d9f0bffbf9506e762bd786bfe/recovered.edits] 2024-12-01T17:11:22,513 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/a163adb2946bdef62e62c0b7fbe9cf4f/cf, FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/a163adb2946bdef62e62c0b7fbe9cf4f/recovered.edits] 2024-12-01T17:11:22,518 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/a163adb2946bdef62e62c0b7fbe9cf4f/cf/3498e98cef714e75b8e1c87b00c8ee0a to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testExportWithChecksum/a163adb2946bdef62e62c0b7fbe9cf4f/cf/3498e98cef714e75b8e1c87b00c8ee0a 2024-12-01T17:11:22,521 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/a163adb2946bdef62e62c0b7fbe9cf4f/recovered.edits/9.seqid to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testExportWithChecksum/a163adb2946bdef62e62c0b7fbe9cf4f/recovered.edits/9.seqid 2024-12-01T17:11:22,522 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/963f442d9f0bffbf9506e762bd786bfe/cf/db40289d51744829afdf3b40fecdade7 to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testExportWithChecksum/963f442d9f0bffbf9506e762bd786bfe/cf/db40289d51744829afdf3b40fecdade7 2024-12-01T17:11:22,522 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/a163adb2946bdef62e62c0b7fbe9cf4f 2024-12-01T17:11:22,528 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/963f442d9f0bffbf9506e762bd786bfe/recovered.edits/12.seqid to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testExportWithChecksum/963f442d9f0bffbf9506e762bd786bfe/recovered.edits/12.seqid 2024-12-01T17:11:22,529 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportWithChecksum/963f442d9f0bffbf9506e762bd786bfe 2024-12-01T17:11:22,529 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithChecksum regions 2024-12-01T17:11:22,531 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=233, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-01T17:11:22,544 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithChecksum from hbase:meta 2024-12-01T17:11:22,547 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithChecksum' descriptor. 2024-12-01T17:11:22,549 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=233, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-01T17:11:22,549 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithChecksum' from region states. 2024-12-01T17:11:22,550 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733073082549"}]},"ts":"9223372036854775807"} 2024-12-01T17:11:22,550 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,1,1733073021630.a163adb2946bdef62e62c0b7fbe9cf4f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733073082549"}]},"ts":"9223372036854775807"} 2024-12-01T17:11:22,553 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-01T17:11:22,553 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 963f442d9f0bffbf9506e762bd786bfe, NAME => 'testtb-testExportWithChecksum,,1733073021630.963f442d9f0bffbf9506e762bd786bfe.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => a163adb2946bdef62e62c0b7fbe9cf4f, NAME => 'testtb-testExportWithChecksum,1,1733073021630.a163adb2946bdef62e62c0b7fbe9cf4f.', STARTKEY => '1', ENDKEY => ''}] 2024-12-01T17:11:22,553 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithChecksum' as deleted. 2024-12-01T17:11:22,554 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733073082553"}]},"ts":"9223372036854775807"} 2024-12-01T17:11:22,557 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithChecksum state from META 2024-12-01T17:11:22,558 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=233, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-01T17:11:22,559 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=233, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum in 67 msec 2024-12-01T17:11:22,594 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-01T17:11:22,594 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-01T17:11:22,594 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-01T17:11:22,595 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-01T17:11:22,595 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-01T17:11:22,595 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-01T17:11:22,595 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-01T17:11:22,596 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-01T17:11:22,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-01T17:11:22,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-01T17:11:22,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-01T17:11:22,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:11:22,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:11:22,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-01T17:11:22,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:11:22,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:11:22,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=233 2024-12-01T17:11:22,607 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-01T17:11:22,607 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithChecksum 2024-12-01T17:11:22,608 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithChecksum completed 2024-12-01T17:11:22,608 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-01T17:11:22,608 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-01T17:11:22,608 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-01T17:11:22,615 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "emptySnaptb0-testExportWithChecksum" type: DISABLED 2024-12-01T17:11:22,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithChecksum 2024-12-01T17:11:22,619 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb0-testExportWithChecksum" type: DISABLED 2024-12-01T17:11:22,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithChecksum 2024-12-01T17:11:22,650 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=816 (was 819), OpenFileDescriptor=823 (was 827), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=718 (was 507) - SystemLoadAverage LEAK? -, ProcessCount=25 (was 17) - ProcessCount LEAK? -, AvailableMemoryMB=2568 (was 3320) 2024-12-01T17:11:22,650 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=816 is superior to 500 2024-12-01T17:11:22,673 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=816, OpenFileDescriptor=823, MaxFileDescriptor=1048576, SystemLoadAverage=718, ProcessCount=25, AvailableMemoryMB=2570 2024-12-01T17:11:22,673 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=816 is superior to 500 2024-12-01T17:11:22,674 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T17:11:22,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=234, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-01T17:11:22,677 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=234, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_PRE_OPERATION 2024-12-01T17:11:22,677 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSkipTmp" procId is: 234 2024-12-01T17:11:22,677 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:11:22,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=234 2024-12-01T17:11:22,678 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=234, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-01T17:11:22,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742366_1542 (size=418) 2024-12-01T17:11:22,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742366_1542 (size=418) 2024-12-01T17:11:22,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742366_1542 (size=418) 2024-12-01T17:11:22,686 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => f3fe92582664bdd6f8db08468f9fd6ce, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733073082674.f3fe92582664bdd6f8db08468f9fd6ce.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:11:22,689 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 051bf283f1b7f14edbd2eaee072c1718, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733073082674.051bf283f1b7f14edbd2eaee072c1718.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:11:22,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742367_1543 (size=79) 2024-12-01T17:11:22,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742367_1543 (size=79) 2024-12-01T17:11:22,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742367_1543 (size=79) 2024-12-01T17:11:22,703 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1733073082674.f3fe92582664bdd6f8db08468f9fd6ce.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:11:22,703 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1722): Closing f3fe92582664bdd6f8db08468f9fd6ce, disabling compactions & flushes 2024-12-01T17:11:22,703 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1733073082674.f3fe92582664bdd6f8db08468f9fd6ce. 2024-12-01T17:11:22,703 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733073082674.f3fe92582664bdd6f8db08468f9fd6ce. 2024-12-01T17:11:22,703 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733073082674.f3fe92582664bdd6f8db08468f9fd6ce. after waiting 0 ms 2024-12-01T17:11:22,703 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1733073082674.f3fe92582664bdd6f8db08468f9fd6ce. 2024-12-01T17:11:22,703 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1733073082674.f3fe92582664bdd6f8db08468f9fd6ce. 2024-12-01T17:11:22,703 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1676): Region close journal for f3fe92582664bdd6f8db08468f9fd6ce: Waiting for close lock at 1733073082703Disabling compacts and flushes for region at 1733073082703Disabling writes for close at 1733073082703Writing region close event to WAL at 1733073082703Closed at 1733073082703 2024-12-01T17:11:22,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742368_1544 (size=79) 2024-12-01T17:11:22,705 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1733073082674.051bf283f1b7f14edbd2eaee072c1718.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:11:22,705 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1722): Closing 051bf283f1b7f14edbd2eaee072c1718, disabling compactions & flushes 2024-12-01T17:11:22,705 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1733073082674.051bf283f1b7f14edbd2eaee072c1718. 2024-12-01T17:11:22,705 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733073082674.051bf283f1b7f14edbd2eaee072c1718. 2024-12-01T17:11:22,705 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733073082674.051bf283f1b7f14edbd2eaee072c1718. after waiting 0 ms 2024-12-01T17:11:22,705 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1733073082674.051bf283f1b7f14edbd2eaee072c1718. 2024-12-01T17:11:22,705 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1733073082674.051bf283f1b7f14edbd2eaee072c1718. 2024-12-01T17:11:22,705 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1676): Region close journal for 051bf283f1b7f14edbd2eaee072c1718: Waiting for close lock at 1733073082705Disabling compacts and flushes for region at 1733073082705Disabling writes for close at 1733073082705Writing region close event to WAL at 1733073082705Closed at 1733073082705 2024-12-01T17:11:22,706 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=234, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ADD_TO_META 2024-12-01T17:11:22,707 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1733073082674.f3fe92582664bdd6f8db08468f9fd6ce.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1733073082706"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733073082706"}]},"ts":"1733073082706"} 2024-12-01T17:11:22,707 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1733073082674.051bf283f1b7f14edbd2eaee072c1718.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1733073082706"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733073082706"}]},"ts":"1733073082706"} 2024-12-01T17:11:22,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742368_1544 (size=79) 2024-12-01T17:11:22,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742368_1544 (size=79) 2024-12-01T17:11:22,709 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-01T17:11:22,709 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=234, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-01T17:11:22,710 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733073082710"}]},"ts":"1733073082710"} 2024-12-01T17:11:22,711 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLING in hbase:meta 2024-12-01T17:11:22,711 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {b87883a7882d=0} racks are {/default-rack=0} 2024-12-01T17:11:22,713 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-01T17:11:22,713 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-01T17:11:22,713 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-01T17:11:22,713 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-01T17:11:22,713 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-01T17:11:22,713 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-01T17:11:22,713 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-01T17:11:22,713 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-01T17:11:22,713 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-01T17:11:22,713 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-01T17:11:22,713 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=235, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=f3fe92582664bdd6f8db08468f9fd6ce, ASSIGN}, {pid=236, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=051bf283f1b7f14edbd2eaee072c1718, ASSIGN}] 2024-12-01T17:11:22,714 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=236, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=051bf283f1b7f14edbd2eaee072c1718, ASSIGN 2024-12-01T17:11:22,714 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=235, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=f3fe92582664bdd6f8db08468f9fd6ce, ASSIGN 2024-12-01T17:11:22,715 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=236, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=051bf283f1b7f14edbd2eaee072c1718, ASSIGN; state=OFFLINE, location=b87883a7882d,44753,1733072737741; forceNewPlan=false, retain=false 2024-12-01T17:11:22,715 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=235, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=f3fe92582664bdd6f8db08468f9fd6ce, ASSIGN; state=OFFLINE, location=b87883a7882d,36521,1733072737896; forceNewPlan=false, retain=false 2024-12-01T17:11:22,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=234 2024-12-01T17:11:22,865 INFO [b87883a7882d:38737 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-01T17:11:22,865 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=236 updating hbase:meta row=051bf283f1b7f14edbd2eaee072c1718, regionState=OPENING, regionLocation=b87883a7882d,44753,1733072737741 2024-12-01T17:11:22,865 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=235 updating hbase:meta row=f3fe92582664bdd6f8db08468f9fd6ce, regionState=OPENING, regionLocation=b87883a7882d,36521,1733072737896 2024-12-01T17:11:22,867 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=236, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=051bf283f1b7f14edbd2eaee072c1718, ASSIGN because future has completed 2024-12-01T17:11:22,867 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=237, ppid=236, state=RUNNABLE, hasLock=false; OpenRegionProcedure 051bf283f1b7f14edbd2eaee072c1718, server=b87883a7882d,44753,1733072737741}] 2024-12-01T17:11:22,868 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=235, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=f3fe92582664bdd6f8db08468f9fd6ce, ASSIGN because future has completed 2024-12-01T17:11:22,868 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=238, ppid=235, state=RUNNABLE, hasLock=false; OpenRegionProcedure f3fe92582664bdd6f8db08468f9fd6ce, server=b87883a7882d,36521,1733072737896}] 2024-12-01T17:11:22,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=234 2024-12-01T17:11:23,022 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,1,1733073082674.051bf283f1b7f14edbd2eaee072c1718. 2024-12-01T17:11:23,022 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(7752): Opening region: {ENCODED => 051bf283f1b7f14edbd2eaee072c1718, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733073082674.051bf283f1b7f14edbd2eaee072c1718.', STARTKEY => '1', ENDKEY => ''} 2024-12-01T17:11:23,022 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,1,1733073082674.051bf283f1b7f14edbd2eaee072c1718. service=AccessControlService 2024-12-01T17:11:23,022 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-01T17:11:23,023 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 051bf283f1b7f14edbd2eaee072c1718 2024-12-01T17:11:23,023 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1733073082674.051bf283f1b7f14edbd2eaee072c1718.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:11:23,023 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(7794): checking encryption for 051bf283f1b7f14edbd2eaee072c1718 2024-12-01T17:11:23,023 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(7797): checking classloading for 051bf283f1b7f14edbd2eaee072c1718 2024-12-01T17:11:23,023 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,,1733073082674.f3fe92582664bdd6f8db08468f9fd6ce. 2024-12-01T17:11:23,023 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(7752): Opening region: {ENCODED => f3fe92582664bdd6f8db08468f9fd6ce, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733073082674.f3fe92582664bdd6f8db08468f9fd6ce.', STARTKEY => '', ENDKEY => '1'} 2024-12-01T17:11:23,023 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,,1733073082674.f3fe92582664bdd6f8db08468f9fd6ce. service=AccessControlService 2024-12-01T17:11:23,024 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-01T17:11:23,024 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp f3fe92582664bdd6f8db08468f9fd6ce 2024-12-01T17:11:23,024 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1733073082674.f3fe92582664bdd6f8db08468f9fd6ce.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T17:11:23,024 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(7794): checking encryption for f3fe92582664bdd6f8db08468f9fd6ce 2024-12-01T17:11:23,024 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(7797): checking classloading for f3fe92582664bdd6f8db08468f9fd6ce 2024-12-01T17:11:23,024 INFO [StoreOpener-051bf283f1b7f14edbd2eaee072c1718-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 051bf283f1b7f14edbd2eaee072c1718 2024-12-01T17:11:23,025 INFO [StoreOpener-f3fe92582664bdd6f8db08468f9fd6ce-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region f3fe92582664bdd6f8db08468f9fd6ce 2024-12-01T17:11:23,025 INFO [StoreOpener-051bf283f1b7f14edbd2eaee072c1718-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 051bf283f1b7f14edbd2eaee072c1718 columnFamilyName cf 2024-12-01T17:11:23,026 DEBUG [StoreOpener-051bf283f1b7f14edbd2eaee072c1718-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:11:23,026 INFO [StoreOpener-051bf283f1b7f14edbd2eaee072c1718-1 {}] regionserver.HStore(327): Store=051bf283f1b7f14edbd2eaee072c1718/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T17:11:23,026 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1038): replaying wal for 051bf283f1b7f14edbd2eaee072c1718 2024-12-01T17:11:23,026 INFO [StoreOpener-f3fe92582664bdd6f8db08468f9fd6ce-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f3fe92582664bdd6f8db08468f9fd6ce columnFamilyName cf 2024-12-01T17:11:23,026 DEBUG [StoreOpener-f3fe92582664bdd6f8db08468f9fd6ce-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T17:11:23,026 INFO [StoreOpener-f3fe92582664bdd6f8db08468f9fd6ce-1 {}] regionserver.HStore(327): Store=f3fe92582664bdd6f8db08468f9fd6ce/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T17:11:23,026 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSkipTmp/051bf283f1b7f14edbd2eaee072c1718 2024-12-01T17:11:23,026 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1038): replaying wal for f3fe92582664bdd6f8db08468f9fd6ce 2024-12-01T17:11:23,027 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSkipTmp/051bf283f1b7f14edbd2eaee072c1718 2024-12-01T17:11:23,027 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSkipTmp/f3fe92582664bdd6f8db08468f9fd6ce 2024-12-01T17:11:23,027 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1048): stopping wal replay for 051bf283f1b7f14edbd2eaee072c1718 2024-12-01T17:11:23,027 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1060): Cleaning up temporary data for 051bf283f1b7f14edbd2eaee072c1718 2024-12-01T17:11:23,027 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSkipTmp/f3fe92582664bdd6f8db08468f9fd6ce 2024-12-01T17:11:23,027 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1048): stopping wal replay for f3fe92582664bdd6f8db08468f9fd6ce 2024-12-01T17:11:23,027 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1060): Cleaning up temporary data for f3fe92582664bdd6f8db08468f9fd6ce 2024-12-01T17:11:23,028 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1093): writing seq id for 051bf283f1b7f14edbd2eaee072c1718 2024-12-01T17:11:23,028 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1093): writing seq id for f3fe92582664bdd6f8db08468f9fd6ce 2024-12-01T17:11:23,029 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSkipTmp/051bf283f1b7f14edbd2eaee072c1718/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T17:11:23,029 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSkipTmp/f3fe92582664bdd6f8db08468f9fd6ce/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T17:11:23,030 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1114): Opened f3fe92582664bdd6f8db08468f9fd6ce; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71910084, jitterRate=0.07154375314712524}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T17:11:23,030 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1114): Opened 051bf283f1b7f14edbd2eaee072c1718; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70259931, jitterRate=0.04695455729961395}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T17:11:23,030 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f3fe92582664bdd6f8db08468f9fd6ce 2024-12-01T17:11:23,030 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 051bf283f1b7f14edbd2eaee072c1718 2024-12-01T17:11:23,030 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1006): Region open journal for 051bf283f1b7f14edbd2eaee072c1718: Running coprocessor pre-open hook at 1733073083023Writing region info on filesystem at 1733073083023Initializing all the Stores at 1733073083024 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733073083024Cleaning up temporary data from old regions at 1733073083027 (+3 ms)Running coprocessor post-open hooks at 1733073083030 (+3 ms)Region opened successfully at 1733073083030 2024-12-01T17:11:23,030 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1006): Region open journal for f3fe92582664bdd6f8db08468f9fd6ce: Running coprocessor pre-open hook at 1733073083024Writing region info on filesystem at 1733073083024Initializing all the Stores at 1733073083024Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733073083024Cleaning up temporary data from old regions at 1733073083027 (+3 ms)Running coprocessor post-open hooks at 1733073083030 (+3 ms)Region opened successfully at 1733073083030 2024-12-01T17:11:23,031 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,,1733073082674.f3fe92582664bdd6f8db08468f9fd6ce., pid=238, masterSystemTime=1733073083020 2024-12-01T17:11:23,031 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,1,1733073082674.051bf283f1b7f14edbd2eaee072c1718., pid=237, masterSystemTime=1733073083019 2024-12-01T17:11:23,032 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,1,1733073082674.051bf283f1b7f14edbd2eaee072c1718. 2024-12-01T17:11:23,032 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,1,1733073082674.051bf283f1b7f14edbd2eaee072c1718. 2024-12-01T17:11:23,033 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=236 updating hbase:meta row=051bf283f1b7f14edbd2eaee072c1718, regionState=OPEN, openSeqNum=2, regionLocation=b87883a7882d,44753,1733072737741 2024-12-01T17:11:23,033 DEBUG [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,,1733073082674.f3fe92582664bdd6f8db08468f9fd6ce. 2024-12-01T17:11:23,033 INFO [RS_OPEN_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,,1733073082674.f3fe92582664bdd6f8db08468f9fd6ce. 2024-12-01T17:11:23,033 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=235 updating hbase:meta row=f3fe92582664bdd6f8db08468f9fd6ce, regionState=OPEN, openSeqNum=2, regionLocation=b87883a7882d,36521,1733072737896 2024-12-01T17:11:23,034 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=237, ppid=236, state=RUNNABLE, hasLock=false; OpenRegionProcedure 051bf283f1b7f14edbd2eaee072c1718, server=b87883a7882d,44753,1733072737741 because future has completed 2024-12-01T17:11:23,035 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=238, ppid=235, state=RUNNABLE, hasLock=false; OpenRegionProcedure f3fe92582664bdd6f8db08468f9fd6ce, server=b87883a7882d,36521,1733072737896 because future has completed 2024-12-01T17:11:23,036 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=237, resume processing ppid=236 2024-12-01T17:11:23,036 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=237, ppid=236, state=SUCCESS, hasLock=false; OpenRegionProcedure 051bf283f1b7f14edbd2eaee072c1718, server=b87883a7882d,44753,1733072737741 in 167 msec 2024-12-01T17:11:23,037 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=238, resume processing ppid=235 2024-12-01T17:11:23,037 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=238, ppid=235, state=SUCCESS, hasLock=false; OpenRegionProcedure f3fe92582664bdd6f8db08468f9fd6ce, server=b87883a7882d,36521,1733072737896 in 167 msec 2024-12-01T17:11:23,037 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=236, ppid=234, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=051bf283f1b7f14edbd2eaee072c1718, ASSIGN in 323 msec 2024-12-01T17:11:23,038 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=235, resume processing ppid=234 2024-12-01T17:11:23,039 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=235, ppid=234, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=f3fe92582664bdd6f8db08468f9fd6ce, ASSIGN in 324 msec 2024-12-01T17:11:23,039 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=234, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-01T17:11:23,039 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733073083039"}]},"ts":"1733073083039"} 2024-12-01T17:11:23,040 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLED in hbase:meta 2024-12-01T17:11:23,041 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=234, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_POST_OPERATION 2024-12-01T17:11:23,041 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSkipTmp jenkins: RWXCA 2024-12-01T17:11:23,044 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36521 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-01T17:11:23,142 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:11:23,142 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:11:23,142 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:11:23,142 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:11:23,153 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-01T17:11:23,153 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-01T17:11:23,154 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-01T17:11:23,154 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-01T17:11:23,157 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-01T17:11:23,157 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-01T17:11:23,158 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-01T17:11:23,158 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-01T17:11:23,159 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=234, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 480 msec 2024-12-01T17:11:23,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=234 2024-12-01T17:11:23,310 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-01T17:11:23,310 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemStateWithSkipTmp get assigned. Timeout = 60000ms 2024-12-01T17:11:23,310 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T17:11:23,314 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned to meta. Checking AM states. 2024-12-01T17:11:23,314 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T17:11:23,315 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned. 2024-12-01T17:11:23,315 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-01T17:11:23,318 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-01T17:11:23,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733073083318 (current time:1733073083318). 2024-12-01T17:11:23,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-01T17:11:23,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-01T17:11:23,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-01T17:11:23,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@19264fc2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:11:23,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:11:23,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:11:23,321 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:11:23,321 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:11:23,321 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:11:23,321 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69c62745, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:11:23,321 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:11:23,322 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:11:23,322 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:11:23,323 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59332, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:11:23,323 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@391f37f5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:11:23,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:11:23,324 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:11:23,325 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:11:23,325 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51700, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:11:23,333 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737. 2024-12-01T17:11:23,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:11:23,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:11:23,333 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:11:23,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:11:23,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@80866a1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:11:23,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:11:23,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:11:23,335 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:11:23,335 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:11:23,335 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:11:23,335 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65d4731c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:11:23,335 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:11:23,335 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:11:23,335 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:11:23,336 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59346, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:11:23,337 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30725a8c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:11:23,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:11:23,338 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:11:23,338 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:11:23,339 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51710, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:11:23,341 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:11:23,341 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:11:23,342 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57106, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:11:23,343 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737. 2024-12-01T17:11:23,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor278.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:11:23,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:11:23,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:11:23,343 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:11:23,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-01T17:11:23,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-01T17:11:23,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=239, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-01T17:11:23,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 239 2024-12-01T17:11:23,346 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-01T17:11:23,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-12-01T17:11:23,347 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-01T17:11:23,348 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-01T17:11:23,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742369_1545 (size=203) 2024-12-01T17:11:23,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742369_1545 (size=203) 2024-12-01T17:11:23,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742369_1545 (size=203) 2024-12-01T17:11:23,355 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-01T17:11:23,356 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=240, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f3fe92582664bdd6f8db08468f9fd6ce}, {pid=241, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 051bf283f1b7f14edbd2eaee072c1718}] 2024-12-01T17:11:23,356 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=241, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 051bf283f1b7f14edbd2eaee072c1718 2024-12-01T17:11:23,357 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=240, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f3fe92582664bdd6f8db08468f9fd6ce 2024-12-01T17:11:23,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-12-01T17:11:23,508 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44753 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=241 2024-12-01T17:11:23,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733073082674.051bf283f1b7f14edbd2eaee072c1718. 2024-12-01T17:11:23,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.HRegion(2603): Flush status journal for 051bf283f1b7f14edbd2eaee072c1718: 2024-12-01T17:11:23,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1733073082674.051bf283f1b7f14edbd2eaee072c1718. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-01T17:11:23,509 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36521 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=240 2024-12-01T17:11:23,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1733073082674.051bf283f1b7f14edbd2eaee072c1718.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-01T17:11:23,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:11:23,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733073082674.f3fe92582664bdd6f8db08468f9fd6ce. 2024-12-01T17:11:23,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-01T17:11:23,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.HRegion(2603): Flush status journal for f3fe92582664bdd6f8db08468f9fd6ce: 2024-12-01T17:11:23,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1733073082674.f3fe92582664bdd6f8db08468f9fd6ce. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-01T17:11:23,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1733073082674.f3fe92582664bdd6f8db08468f9fd6ce.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-01T17:11:23,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:11:23,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-01T17:11:23,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742371_1547 (size=82) 2024-12-01T17:11:23,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742370_1546 (size=82) 2024-12-01T17:11:23,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742371_1547 (size=82) 2024-12-01T17:11:23,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742370_1546 (size=82) 2024-12-01T17:11:23,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742371_1547 (size=82) 2024-12-01T17:11:23,522 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733073082674.051bf283f1b7f14edbd2eaee072c1718. 2024-12-01T17:11:23,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742370_1546 (size=82) 2024-12-01T17:11:23,522 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733073082674.f3fe92582664bdd6f8db08468f9fd6ce. 2024-12-01T17:11:23,522 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=240 2024-12-01T17:11:23,523 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=241 2024-12-01T17:11:23,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=240 2024-12-01T17:11:23,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=241 2024-12-01T17:11:23,523 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region f3fe92582664bdd6f8db08468f9fd6ce 2024-12-01T17:11:23,523 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 051bf283f1b7f14edbd2eaee072c1718 2024-12-01T17:11:23,523 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=240, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f3fe92582664bdd6f8db08468f9fd6ce 2024-12-01T17:11:23,523 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=241, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 051bf283f1b7f14edbd2eaee072c1718 2024-12-01T17:11:23,525 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=241, ppid=239, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 051bf283f1b7f14edbd2eaee072c1718 in 169 msec 2024-12-01T17:11:23,526 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=240, resume processing ppid=239 2024-12-01T17:11:23,526 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=240, ppid=239, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f3fe92582664bdd6f8db08468f9fd6ce in 169 msec 2024-12-01T17:11:23,526 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-01T17:11:23,527 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-01T17:11:23,528 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-01T17:11:23,528 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-01T17:11:23,528 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-01T17:11:23,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742372_1548 (size=585) 2024-12-01T17:11:23,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742372_1548 (size=585) 2024-12-01T17:11:23,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742372_1548 (size=585) 2024-12-01T17:11:23,541 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-01T17:11:23,546 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-01T17:11:23,547 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-01T17:11:23,548 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-01T17:11:23,548 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 239 2024-12-01T17:11:23,549 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=239, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 204 msec 2024-12-01T17:11:23,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-12-01T17:11:23,660 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-01T17:11:23,665 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='0d4b41beeb159a0602821d88d24e0d78e', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,,1733073082674.f3fe92582664bdd6f8db08468f9fd6ce., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:11:23,666 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='10a42ac052d8d6a3408092e97511c26da', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1733073082674.051bf283f1b7f14edbd2eaee072c1718., hostname=b87883a7882d,44753,1733072737741, seqNum=2] 2024-12-01T17:11:23,669 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='325dd73576e3f06da68fa88ea73a569d6', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1733073082674.051bf283f1b7f14edbd2eaee072c1718., hostname=b87883a7882d,44753,1733072737741, seqNum=2] 2024-12-01T17:11:23,670 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='2d92249417c4a51f6abdab40370938e3a', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1733073082674.051bf283f1b7f14edbd2eaee072c1718., hostname=b87883a7882d,44753,1733072737741, seqNum=2] 2024-12-01T17:11:23,670 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='4b9d11d1d05c0da8c1013f202f01a820c', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1733073082674.051bf283f1b7f14edbd2eaee072c1718., hostname=b87883a7882d,44753,1733072737741, seqNum=2] 2024-12-01T17:11:23,671 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='55515e61b247fb0c6fe89ce1341cac6a4', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1733073082674.051bf283f1b7f14edbd2eaee072c1718., hostname=b87883a7882d,44753,1733072737741, seqNum=2] 2024-12-01T17:11:23,672 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36521 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,,1733073082674.f3fe92582664bdd6f8db08468f9fd6ce. with WAL disabled. Data may be lost in the event of a crash. 2024-12-01T17:11:23,672 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='637ee69e59382418226375e9612ee2d37', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1733073082674.051bf283f1b7f14edbd2eaee072c1718., hostname=b87883a7882d,44753,1733072737741, seqNum=2] 2024-12-01T17:11:23,674 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44753 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,1,1733073082674.051bf283f1b7f14edbd2eaee072c1718. with WAL disabled. Data may be lost in the event of a crash. 2024-12-01T17:11:23,676 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-01T17:11:23,679 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-12-01T17:11:23,679 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1733073082674.f3fe92582664bdd6f8db08468f9fd6ce. 2024-12-01T17:11:23,679 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T17:11:23,681 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-01T17:11:23,687 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-01T17:11:23,694 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-01T17:11:23,698 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-01T17:11:23,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733073083698 (current time:1733073083698). 2024-12-01T17:11:23,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-01T17:11:23,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-01T17:11:23,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-01T17:11:23,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@20cf6b6a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:11:23,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:11:23,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:11:23,700 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:11:23,700 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:11:23,700 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:11:23,700 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@607669a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:11:23,700 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:11:23,700 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:11:23,701 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:11:23,701 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59358, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:11:23,702 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ec1551c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:11:23,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:11:23,703 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:11:23,703 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:11:23,704 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51722, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:11:23,706 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737. 2024-12-01T17:11:23,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:11:23,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:11:23,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:11:23,707 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:11:23,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c9d86cc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:11:23,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.ClusterIdFetcher(90): Going to request b87883a7882d,38737,-1 for getting cluster id 2024-12-01T17:11:23,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T17:11:23,708 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '75930971-73b3-4a63-a3bb-6db1d7d1d483' 2024-12-01T17:11:23,708 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T17:11:23,709 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "75930971-73b3-4a63-a3bb-6db1d7d1d483" 2024-12-01T17:11:23,709 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c9abda2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:11:23,709 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b87883a7882d,38737,-1] 2024-12-01T17:11:23,709 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T17:11:23,709 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:11:23,710 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59376, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T17:11:23,710 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76757034, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T17:11:23,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T17:11:23,711 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b87883a7882d,44753,1733072737741, seqNum=-1] 2024-12-01T17:11:23,712 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:11:23,712 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51734, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:11:23,714 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89., hostname=b87883a7882d,36521,1733072737896, seqNum=2] 2024-12-01T17:11:23,714 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T17:11:23,715 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57116, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T17:11:23,716 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737. 2024-12-01T17:11:23,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor278.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T17:11:23,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:11:23,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:11:23,716 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:11:23,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-01T17:11:23,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-01T17:11:23,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=242, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-01T17:11:23,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 242 2024-12-01T17:11:23,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-01T17:11:23,719 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-01T17:11:23,720 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-01T17:11:23,722 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-01T17:11:23,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742373_1549 (size=198) 2024-12-01T17:11:23,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742373_1549 (size=198) 2024-12-01T17:11:23,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742373_1549 (size=198) 2024-12-01T17:11:23,730 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-01T17:11:23,730 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=243, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f3fe92582664bdd6f8db08468f9fd6ce}, {pid=244, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 051bf283f1b7f14edbd2eaee072c1718}] 2024-12-01T17:11:23,731 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=243, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f3fe92582664bdd6f8db08468f9fd6ce 2024-12-01T17:11:23,731 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=244, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 051bf283f1b7f14edbd2eaee072c1718 2024-12-01T17:11:23,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-01T17:11:23,882 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36521 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=243 2024-12-01T17:11:23,882 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44753 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=244 2024-12-01T17:11:23,883 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733073082674.f3fe92582664bdd6f8db08468f9fd6ce. 2024-12-01T17:11:23,883 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733073082674.051bf283f1b7f14edbd2eaee072c1718. 2024-12-01T17:11:23,883 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HRegion(2902): Flushing f3fe92582664bdd6f8db08468f9fd6ce 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-01T17:11:23,883 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HRegion(2902): Flushing 051bf283f1b7f14edbd2eaee072c1718 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-01T17:11:23,896 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSkipTmp/f3fe92582664bdd6f8db08468f9fd6ce/.tmp/cf/2b5574e44ba9464291fa4da2ffe5bb8c is 71, key is 08b925063a8d9de5e8b17c5598110e0c/cf:q/1733073083672/Put/seqid=0 2024-12-01T17:11:23,898 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSkipTmp/051bf283f1b7f14edbd2eaee072c1718/.tmp/cf/07687b8cf8fd41b795a85e3bfce43ff3 is 71, key is 11bb26edb43dd05642b1a99a999740cc/cf:q/1733073083674/Put/seqid=0 2024-12-01T17:11:23,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742374_1550 (size=5216) 2024-12-01T17:11:23,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742374_1550 (size=5216) 2024-12-01T17:11:23,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742374_1550 (size=5216) 2024-12-01T17:11:23,911 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=132 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSkipTmp/f3fe92582664bdd6f8db08468f9fd6ce/.tmp/cf/2b5574e44ba9464291fa4da2ffe5bb8c 2024-12-01T17:11:23,917 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSkipTmp/f3fe92582664bdd6f8db08468f9fd6ce/.tmp/cf/2b5574e44ba9464291fa4da2ffe5bb8c as hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSkipTmp/f3fe92582664bdd6f8db08468f9fd6ce/cf/2b5574e44ba9464291fa4da2ffe5bb8c 2024-12-01T17:11:23,922 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSkipTmp/f3fe92582664bdd6f8db08468f9fd6ce/cf/2b5574e44ba9464291fa4da2ffe5bb8c, entries=2, sequenceid=6, filesize=5.1 K 2024-12-01T17:11:23,923 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for f3fe92582664bdd6f8db08468f9fd6ce in 40ms, sequenceid=6, compaction requested=false 2024-12-01T17:11:23,923 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSkipTmp' 2024-12-01T17:11:23,924 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HRegion(2603): Flush status journal for f3fe92582664bdd6f8db08468f9fd6ce: 2024-12-01T17:11:23,924 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1733073082674.f3fe92582664bdd6f8db08468f9fd6ce. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-01T17:11:23,924 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1733073082674.f3fe92582664bdd6f8db08468f9fd6ce.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-01T17:11:23,924 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:11:23,924 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSkipTmp/f3fe92582664bdd6f8db08468f9fd6ce/cf/2b5574e44ba9464291fa4da2ffe5bb8c] hfiles 2024-12-01T17:11:23,924 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSkipTmp/f3fe92582664bdd6f8db08468f9fd6ce/cf/2b5574e44ba9464291fa4da2ffe5bb8c for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-01T17:11:23,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742375_1551 (size=8394) 2024-12-01T17:11:23,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742375_1551 (size=8394) 2024-12-01T17:11:23,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742375_1551 (size=8394) 2024-12-01T17:11:23,935 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.13 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSkipTmp/051bf283f1b7f14edbd2eaee072c1718/.tmp/cf/07687b8cf8fd41b795a85e3bfce43ff3 2024-12-01T17:11:23,941 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSkipTmp/051bf283f1b7f14edbd2eaee072c1718/.tmp/cf/07687b8cf8fd41b795a85e3bfce43ff3 as hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSkipTmp/051bf283f1b7f14edbd2eaee072c1718/cf/07687b8cf8fd41b795a85e3bfce43ff3 2024-12-01T17:11:23,946 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSkipTmp/051bf283f1b7f14edbd2eaee072c1718/cf/07687b8cf8fd41b795a85e3bfce43ff3, entries=48, sequenceid=6, filesize=8.2 K 2024-12-01T17:11:23,947 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 051bf283f1b7f14edbd2eaee072c1718 in 64ms, sequenceid=6, compaction requested=false 2024-12-01T17:11:23,947 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HRegion(2603): Flush status journal for 051bf283f1b7f14edbd2eaee072c1718: 2024-12-01T17:11:23,948 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1733073082674.051bf283f1b7f14edbd2eaee072c1718. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-01T17:11:23,948 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1733073082674.051bf283f1b7f14edbd2eaee072c1718.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-01T17:11:23,948 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-01T17:11:23,948 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSkipTmp/051bf283f1b7f14edbd2eaee072c1718/cf/07687b8cf8fd41b795a85e3bfce43ff3] hfiles 2024-12-01T17:11:23,948 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSkipTmp/051bf283f1b7f14edbd2eaee072c1718/cf/07687b8cf8fd41b795a85e3bfce43ff3 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-01T17:11:23,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742376_1552 (size=121) 2024-12-01T17:11:23,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742376_1552 (size=121) 2024-12-01T17:11:23,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742376_1552 (size=121) 2024-12-01T17:11:23,954 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733073082674.f3fe92582664bdd6f8db08468f9fd6ce. 2024-12-01T17:11:23,954 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=243 2024-12-01T17:11:23,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=243 2024-12-01T17:11:23,955 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region f3fe92582664bdd6f8db08468f9fd6ce 2024-12-01T17:11:23,955 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=243, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f3fe92582664bdd6f8db08468f9fd6ce 2024-12-01T17:11:23,957 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=243, ppid=242, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f3fe92582664bdd6f8db08468f9fd6ce in 226 msec 2024-12-01T17:11:23,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742377_1553 (size=121) 2024-12-01T17:11:23,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742377_1553 (size=121) 2024-12-01T17:11:23,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742377_1553 (size=121) 2024-12-01T17:11:23,971 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733073082674.051bf283f1b7f14edbd2eaee072c1718. 2024-12-01T17:11:23,971 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b87883a7882d:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=244 2024-12-01T17:11:23,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.HMaster(4169): Remote procedure done, pid=244 2024-12-01T17:11:23,971 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 051bf283f1b7f14edbd2eaee072c1718 2024-12-01T17:11:23,971 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=244, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 051bf283f1b7f14edbd2eaee072c1718 2024-12-01T17:11:23,974 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=244, resume processing ppid=242 2024-12-01T17:11:23,974 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-01T17:11:23,974 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=244, ppid=242, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 051bf283f1b7f14edbd2eaee072c1718 in 242 msec 2024-12-01T17:11:23,975 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-01T17:11:23,976 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-01T17:11:23,976 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-01T17:11:23,976 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-01T17:11:23,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742378_1554 (size=663) 2024-12-01T17:11:23,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742378_1554 (size=663) 2024-12-01T17:11:23,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742378_1554 (size=663) 2024-12-01T17:11:23,993 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-01T17:11:23,999 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-01T17:11:24,000 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-01T17:11:24,001 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-01T17:11:24,001 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 242 2024-12-01T17:11:24,003 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=242, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 284 msec 2024-12-01T17:11:24,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-01T17:11:24,040 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-01T17:11:24,040 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733073084040 2024-12-01T17:11:24,040 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:44451, tgtDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733073084040, rawTgtDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733073084040, srcFsUri=hdfs://localhost:44451, srcDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:11:24,072 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:44451, inputRoot=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546 2024-12-01T17:11:24,072 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-69539515_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733073084040, skipTmp=true, initialOutputSnapshotDir=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733073084040/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-01T17:11:24,074 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-01T17:11:24,079 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733073084040/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-01T17:11:24,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742380_1556 (size=663) 2024-12-01T17:11:24,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742380_1556 (size=663) 2024-12-01T17:11:24,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742380_1556 (size=663) 2024-12-01T17:11:24,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742379_1555 (size=198) 2024-12-01T17:11:24,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742379_1555 (size=198) 2024-12-01T17:11:24,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742379_1555 (size=198) 2024-12-01T17:11:24,100 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:11:24,100 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:11:24,101 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:11:25,055 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/hadoop-10007709659832164019.jar 2024-12-01T17:11:25,056 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:11:25,056 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:11:25,115 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/hadoop-6634306996770216380.jar 2024-12-01T17:11:25,115 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:11:25,115 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:11:25,115 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:11:25,116 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:11:25,116 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:11:25,116 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-01T17:11:25,116 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-01T17:11:25,116 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-01T17:11:25,116 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-01T17:11:25,117 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-01T17:11:25,117 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-01T17:11:25,117 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-01T17:11:25,117 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-01T17:11:25,117 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-01T17:11:25,117 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-01T17:11:25,118 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-01T17:11:25,118 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-01T17:11:25,118 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:11:25,118 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:11:25,118 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-01T17:11:25,118 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:11:25,119 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-01T17:11:25,119 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-01T17:11:25,119 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-01T17:11:25,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742381_1557 (size=24020) 2024-12-01T17:11:25,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742381_1557 (size=24020) 2024-12-01T17:11:25,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742381_1557 (size=24020) 2024-12-01T17:11:25,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742382_1558 (size=77755) 2024-12-01T17:11:25,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742382_1558 (size=77755) 2024-12-01T17:11:25,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742382_1558 (size=77755) 2024-12-01T17:11:25,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742383_1559 (size=131360) 2024-12-01T17:11:25,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742383_1559 (size=131360) 2024-12-01T17:11:25,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742383_1559 (size=131360) 2024-12-01T17:11:25,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742384_1560 (size=111793) 2024-12-01T17:11:25,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742384_1560 (size=111793) 2024-12-01T17:11:25,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742384_1560 (size=111793) 2024-12-01T17:11:25,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742385_1561 (size=1832290) 2024-12-01T17:11:25,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742385_1561 (size=1832290) 2024-12-01T17:11:25,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742385_1561 (size=1832290) 2024-12-01T17:11:25,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742386_1562 (size=8360005) 2024-12-01T17:11:25,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742386_1562 (size=8360005) 2024-12-01T17:11:25,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742386_1562 (size=8360005) 2024-12-01T17:11:25,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742387_1563 (size=440956) 2024-12-01T17:11:25,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742387_1563 (size=440956) 2024-12-01T17:11:25,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742387_1563 (size=440956) 2024-12-01T17:11:25,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742388_1564 (size=503880) 2024-12-01T17:11:25,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742388_1564 (size=503880) 2024-12-01T17:11:25,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742388_1564 (size=503880) 2024-12-01T17:11:25,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742389_1565 (size=322274) 2024-12-01T17:11:25,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742389_1565 (size=322274) 2024-12-01T17:11:25,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742389_1565 (size=322274) 2024-12-01T17:11:25,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742390_1566 (size=20406) 2024-12-01T17:11:25,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742390_1566 (size=20406) 2024-12-01T17:11:25,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742390_1566 (size=20406) 2024-12-01T17:11:25,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742391_1567 (size=45609) 2024-12-01T17:11:25,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742391_1567 (size=45609) 2024-12-01T17:11:25,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742391_1567 (size=45609) 2024-12-01T17:11:25,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742392_1568 (size=136454) 2024-12-01T17:11:25,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742392_1568 (size=136454) 2024-12-01T17:11:25,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742392_1568 (size=136454) 2024-12-01T17:11:25,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742393_1569 (size=1597136) 2024-12-01T17:11:25,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742393_1569 (size=1597136) 2024-12-01T17:11:25,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742393_1569 (size=1597136) 2024-12-01T17:11:25,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742394_1570 (size=30873) 2024-12-01T17:11:25,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742394_1570 (size=30873) 2024-12-01T17:11:25,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742394_1570 (size=30873) 2024-12-01T17:11:25,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742395_1571 (size=29229) 2024-12-01T17:11:25,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742395_1571 (size=29229) 2024-12-01T17:11:25,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742395_1571 (size=29229) 2024-12-01T17:11:25,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742396_1572 (size=903864) 2024-12-01T17:11:25,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742396_1572 (size=903864) 2024-12-01T17:11:25,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742396_1572 (size=903864) 2024-12-01T17:11:25,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742397_1573 (size=6424742) 2024-12-01T17:11:25,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742397_1573 (size=6424742) 2024-12-01T17:11:25,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742397_1573 (size=6424742) 2024-12-01T17:11:25,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742398_1574 (size=5175431) 2024-12-01T17:11:25,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742398_1574 (size=5175431) 2024-12-01T17:11:25,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742398_1574 (size=5175431) 2024-12-01T17:11:25,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742399_1575 (size=232881) 2024-12-01T17:11:25,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742399_1575 (size=232881) 2024-12-01T17:11:25,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742399_1575 (size=232881) 2024-12-01T17:11:25,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742400_1576 (size=1323991) 2024-12-01T17:11:25,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742400_1576 (size=1323991) 2024-12-01T17:11:25,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742400_1576 (size=1323991) 2024-12-01T17:11:25,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742401_1577 (size=4695811) 2024-12-01T17:11:25,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742401_1577 (size=4695811) 2024-12-01T17:11:25,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742401_1577 (size=4695811) 2024-12-01T17:11:25,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742402_1578 (size=1877034) 2024-12-01T17:11:25,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742402_1578 (size=1877034) 2024-12-01T17:11:25,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742402_1578 (size=1877034) 2024-12-01T17:11:25,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742403_1579 (size=217555) 2024-12-01T17:11:25,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742403_1579 (size=217555) 2024-12-01T17:11:25,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742403_1579 (size=217555) 2024-12-01T17:11:25,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742404_1580 (size=4188619) 2024-12-01T17:11:25,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742404_1580 (size=4188619) 2024-12-01T17:11:25,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742404_1580 (size=4188619) 2024-12-01T17:11:25,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742405_1581 (size=127628) 2024-12-01T17:11:25,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742405_1581 (size=127628) 2024-12-01T17:11:25,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742405_1581 (size=127628) 2024-12-01T17:11:25,862 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-01T17:11:25,864 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemStateWithSkipTmp' hfile list 2024-12-01T17:11:25,865 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.2 K 2024-12-01T17:11:25,865 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.1 K 2024-12-01T17:11:25,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742406_1582 (size=469) 2024-12-01T17:11:25,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742406_1582 (size=469) 2024-12-01T17:11:25,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742406_1582 (size=469) 2024-12-01T17:11:25,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742407_1583 (size=21) 2024-12-01T17:11:25,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742407_1583 (size=21) 2024-12-01T17:11:25,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742407_1583 (size=21) 2024-12-01T17:11:25,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742408_1584 (size=304253) 2024-12-01T17:11:25,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742408_1584 (size=304253) 2024-12-01T17:11:25,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742408_1584 (size=304253) 2024-12-01T17:11:26,188 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-01T17:11:26,188 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-01T17:11:26,191 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0010_000001 (auth:SIMPLE) from 127.0.0.1:60518 2024-12-01T17:11:26,200 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_1/usercache/jenkins/appcache/application_1733072744857_0010/container_1733072744857_0010_01_000001/launch_container.sh] 2024-12-01T17:11:26,200 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_1/usercache/jenkins/appcache/application_1733072744857_0010/container_1733072744857_0010_01_000001/container_tokens] 2024-12-01T17:11:26,200 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_1/usercache/jenkins/appcache/application_1733072744857_0010/container_1733072744857_0010_01_000001/sysfs] 2024-12-01T17:11:27,143 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0011_000001 (auth:SIMPLE) from 127.0.0.1:48242 2024-12-01T17:11:27,330 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-01T17:11:27,330 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp Metrics about Tables on a single HBase RegionServer 2024-12-01T17:11:27,331 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-01T17:11:27,876 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-01T17:11:30,281 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region ced0bf6724c9afd6d078a6aaf8bb47a9, had cached 0 bytes from a total of 5216 2024-12-01T17:11:30,281 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 13964bd13cf62ff2c1f3f282d989070a, had cached 0 bytes from a total of 8394 2024-12-01T17:11:32,834 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-01T17:11:35,413 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0011_000001 (auth:SIMPLE) from 127.0.0.1:48896 2024-12-01T17:11:35,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742409_1585 (size=349975) 2024-12-01T17:11:35,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742409_1585 (size=349975) 2024-12-01T17:11:35,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742409_1585 (size=349975) 2024-12-01T17:11:35,860 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-01T17:11:37,604 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0011_000001 (auth:SIMPLE) from 127.0.0.1:50424 2024-12-01T17:11:37,604 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0011_000001 (auth:SIMPLE) from 127.0.0.1:47218 2024-12-01T17:11:40,077 WARN [regionserver/b87883a7882d:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 1, running: 1 2024-12-01T17:11:40,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742410_1586 (size=8394) 2024-12-01T17:11:40,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742410_1586 (size=8394) 2024-12-01T17:11:40,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742410_1586 (size=8394) 2024-12-01T17:11:40,664 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 43379 2024-12-01T17:11:40,787 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_0/usercache/jenkins/appcache/application_1733072744857_0011/container_1733072744857_0011_01_000002/launch_container.sh] 2024-12-01T17:11:40,787 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_0/usercache/jenkins/appcache/application_1733072744857_0011/container_1733072744857_0011_01_000002/container_tokens] 2024-12-01T17:11:40,787 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_0/usercache/jenkins/appcache/application_1733072744857_0011/container_1733072744857_0011_01_000002/sysfs] 2024-12-01T17:11:41,110 DEBUG [master/b87883a7882d:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region f3fe92582664bdd6f8db08468f9fd6ce changed from -1.0 to 0.0, refreshing cache 2024-12-01T17:11:41,111 DEBUG [master/b87883a7882d:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 051bf283f1b7f14edbd2eaee072c1718 changed from -1.0 to 0.0, refreshing cache 2024-12-01T17:11:41,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742412_1588 (size=5216) 2024-12-01T17:11:41,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742412_1588 (size=5216) 2024-12-01T17:11:41,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742412_1588 (size=5216) 2024-12-01T17:11:41,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742411_1587 (size=22208) 2024-12-01T17:11:41,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742411_1587 (size=22208) 2024-12-01T17:11:41,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742411_1587 (size=22208) 2024-12-01T17:11:41,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742413_1589 (size=476) 2024-12-01T17:11:41,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742413_1589 (size=476) 2024-12-01T17:11:41,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742413_1589 (size=476) 2024-12-01T17:11:41,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742414_1590 (size=22208) 2024-12-01T17:11:41,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742414_1590 (size=22208) 2024-12-01T17:11:41,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742414_1590 (size=22208) 2024-12-01T17:11:41,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742415_1591 (size=349975) 2024-12-01T17:11:41,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742415_1591 (size=349975) 2024-12-01T17:11:41,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742415_1591 (size=349975) 2024-12-01T17:11:41,819 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733072744857_0011_000001 (auth:SIMPLE) from 127.0.0.1:47220 2024-12-01T17:11:41,832 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733072744857_0011_01_000003 is : 143 2024-12-01T17:11:41,841 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_3/usercache/jenkins/appcache/application_1733072744857_0011/container_1733072744857_0011_01_000003/launch_container.sh] 2024-12-01T17:11:41,841 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_3/usercache/jenkins/appcache/application_1733072744857_0011/container_1733072744857_0011_01_000003/container_tokens] 2024-12-01T17:11:41,841 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-0_3/usercache/jenkins/appcache/application_1733072744857_0011/container_1733072744857_0011_01_000003/sysfs] 2024-12-01T17:11:43,045 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-01T17:11:43,045 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-01T17:11:43,049 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-01T17:11:43,050 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-01T17:11:43,050 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-01T17:11:43,050 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-69539515_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-01T17:11:43,050 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-01T17:11:43,050 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-01T17:11:43,050 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-69539515_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733073084040/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733073084040/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-01T17:11:43,051 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733073084040/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-01T17:11:43,051 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/export-test/export-1733073084040/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-01T17:11:43,055 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testtb-testExportFileSystemStateWithSkipTmp 2024-12-01T17:11:43,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=245, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-01T17:11:43,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-12-01T17:11:43,058 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733073103058"}]},"ts":"1733073103058"} 2024-12-01T17:11:43,060 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLING in hbase:meta 2024-12-01T17:11:43,060 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLING 2024-12-01T17:11:43,061 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=246, ppid=245, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp}] 2024-12-01T17:11:43,062 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=247, ppid=246, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=f3fe92582664bdd6f8db08468f9fd6ce, UNASSIGN}, {pid=248, ppid=246, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=051bf283f1b7f14edbd2eaee072c1718, UNASSIGN}] 2024-12-01T17:11:43,063 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=248, ppid=246, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=051bf283f1b7f14edbd2eaee072c1718, UNASSIGN 2024-12-01T17:11:43,063 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=247, ppid=246, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=f3fe92582664bdd6f8db08468f9fd6ce, UNASSIGN 2024-12-01T17:11:43,064 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=247 updating hbase:meta row=f3fe92582664bdd6f8db08468f9fd6ce, regionState=CLOSING, regionLocation=b87883a7882d,36521,1733072737896 2024-12-01T17:11:43,064 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=248 updating hbase:meta row=051bf283f1b7f14edbd2eaee072c1718, regionState=CLOSING, regionLocation=b87883a7882d,44753,1733072737741 2024-12-01T17:11:43,065 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=247, ppid=246, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=f3fe92582664bdd6f8db08468f9fd6ce, UNASSIGN because future has completed 2024-12-01T17:11:43,065 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-01T17:11:43,066 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=249, ppid=247, state=RUNNABLE, hasLock=false; CloseRegionProcedure f3fe92582664bdd6f8db08468f9fd6ce, server=b87883a7882d,36521,1733072737896}] 2024-12-01T17:11:43,066 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=248, ppid=246, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=051bf283f1b7f14edbd2eaee072c1718, UNASSIGN because future has completed 2024-12-01T17:11:43,066 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-01T17:11:43,066 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=250, ppid=248, state=RUNNABLE, hasLock=false; CloseRegionProcedure 051bf283f1b7f14edbd2eaee072c1718, server=b87883a7882d,44753,1733072737741}] 2024-12-01T17:11:43,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-12-01T17:11:43,218 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] handler.UnassignRegionHandler(122): Close f3fe92582664bdd6f8db08468f9fd6ce 2024-12-01T17:11:43,218 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-01T17:11:43,219 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1722): Closing f3fe92582664bdd6f8db08468f9fd6ce, disabling compactions & flushes 2024-12-01T17:11:43,219 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1733073082674.f3fe92582664bdd6f8db08468f9fd6ce. 2024-12-01T17:11:43,219 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733073082674.f3fe92582664bdd6f8db08468f9fd6ce. 2024-12-01T17:11:43,219 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733073082674.f3fe92582664bdd6f8db08468f9fd6ce. after waiting 0 ms 2024-12-01T17:11:43,219 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1733073082674.f3fe92582664bdd6f8db08468f9fd6ce. 2024-12-01T17:11:43,219 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] handler.UnassignRegionHandler(122): Close 051bf283f1b7f14edbd2eaee072c1718 2024-12-01T17:11:43,219 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-01T17:11:43,219 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1722): Closing 051bf283f1b7f14edbd2eaee072c1718, disabling compactions & flushes 2024-12-01T17:11:43,219 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1733073082674.051bf283f1b7f14edbd2eaee072c1718. 2024-12-01T17:11:43,219 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733073082674.051bf283f1b7f14edbd2eaee072c1718. 2024-12-01T17:11:43,219 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733073082674.051bf283f1b7f14edbd2eaee072c1718. after waiting 0 ms 2024-12-01T17:11:43,219 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1733073082674.051bf283f1b7f14edbd2eaee072c1718. 2024-12-01T17:11:43,237 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSkipTmp/f3fe92582664bdd6f8db08468f9fd6ce/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-01T17:11:43,237 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSkipTmp/051bf283f1b7f14edbd2eaee072c1718/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-01T17:11:43,238 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-01T17:11:43,238 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-01T17:11:43,238 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1733073082674.051bf283f1b7f14edbd2eaee072c1718. 2024-12-01T17:11:43,238 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1733073082674.f3fe92582664bdd6f8db08468f9fd6ce. 2024-12-01T17:11:43,238 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1676): Region close journal for 051bf283f1b7f14edbd2eaee072c1718: Waiting for close lock at 1733073103219Running coprocessor pre-close hooks at 1733073103219Disabling compacts and flushes for region at 1733073103219Disabling writes for close at 1733073103219Writing region close event to WAL at 1733073103228 (+9 ms)Running coprocessor post-close hooks at 1733073103237 (+9 ms)Closed at 1733073103238 (+1 ms) 2024-12-01T17:11:43,238 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1676): Region close journal for f3fe92582664bdd6f8db08468f9fd6ce: Waiting for close lock at 1733073103219Running coprocessor pre-close hooks at 1733073103219Disabling compacts and flushes for region at 1733073103219Disabling writes for close at 1733073103219Writing region close event to WAL at 1733073103228 (+9 ms)Running coprocessor post-close hooks at 1733073103238 (+10 ms)Closed at 1733073103238 2024-12-01T17:11:43,240 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] handler.UnassignRegionHandler(157): Closed 051bf283f1b7f14edbd2eaee072c1718 2024-12-01T17:11:43,240 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=248 updating hbase:meta row=051bf283f1b7f14edbd2eaee072c1718, regionState=CLOSED 2024-12-01T17:11:43,242 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] handler.UnassignRegionHandler(157): Closed f3fe92582664bdd6f8db08468f9fd6ce 2024-12-01T17:11:43,242 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=247 updating hbase:meta row=f3fe92582664bdd6f8db08468f9fd6ce, regionState=CLOSED 2024-12-01T17:11:43,244 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=250, ppid=248, state=RUNNABLE, hasLock=false; CloseRegionProcedure 051bf283f1b7f14edbd2eaee072c1718, server=b87883a7882d,44753,1733072737741 because future has completed 2024-12-01T17:11:43,245 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=249, ppid=247, state=RUNNABLE, hasLock=false; CloseRegionProcedure f3fe92582664bdd6f8db08468f9fd6ce, server=b87883a7882d,36521,1733072737896 because future has completed 2024-12-01T17:11:43,247 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=250, resume processing ppid=248 2024-12-01T17:11:43,247 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=250, ppid=248, state=SUCCESS, hasLock=false; CloseRegionProcedure 051bf283f1b7f14edbd2eaee072c1718, server=b87883a7882d,44753,1733072737741 in 179 msec 2024-12-01T17:11:43,248 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=249, resume processing ppid=247 2024-12-01T17:11:43,248 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=249, ppid=247, state=SUCCESS, hasLock=false; CloseRegionProcedure f3fe92582664bdd6f8db08468f9fd6ce, server=b87883a7882d,36521,1733072737896 in 181 msec 2024-12-01T17:11:43,248 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=248, ppid=246, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=051bf283f1b7f14edbd2eaee072c1718, UNASSIGN in 185 msec 2024-12-01T17:11:43,250 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=247, resume processing ppid=246 2024-12-01T17:11:43,250 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=247, ppid=246, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=f3fe92582664bdd6f8db08468f9fd6ce, UNASSIGN in 186 msec 2024-12-01T17:11:43,252 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=246, resume processing ppid=245 2024-12-01T17:11:43,252 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=246, ppid=245, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 189 msec 2024-12-01T17:11:43,253 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733073103253"}]},"ts":"1733073103253"} 2024-12-01T17:11:43,255 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLED in hbase:meta 2024-12-01T17:11:43,255 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLED 2024-12-01T17:11:43,257 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=245, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 201 msec 2024-12-01T17:11:43,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-12-01T17:11:43,380 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-01T17:11:43,381 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testtb-testExportFileSystemStateWithSkipTmp 2024-12-01T17:11:43,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] procedure2.ProcedureExecutor(1139): Stored pid=251, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-01T17:11:43,383 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=251, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-01T17:11:43,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSkipTmp 2024-12-01T17:11:43,384 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=251, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-01T17:11:43,386 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36521 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSkipTmp 2024-12-01T17:11:43,388 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSkipTmp/f3fe92582664bdd6f8db08468f9fd6ce 2024-12-01T17:11:43,388 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSkipTmp/051bf283f1b7f14edbd2eaee072c1718 2024-12-01T17:11:43,389 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSkipTmp/051bf283f1b7f14edbd2eaee072c1718/cf, FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSkipTmp/051bf283f1b7f14edbd2eaee072c1718/recovered.edits] 2024-12-01T17:11:43,389 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSkipTmp/f3fe92582664bdd6f8db08468f9fd6ce/cf, FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSkipTmp/f3fe92582664bdd6f8db08468f9fd6ce/recovered.edits] 2024-12-01T17:11:43,392 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSkipTmp/f3fe92582664bdd6f8db08468f9fd6ce/cf/2b5574e44ba9464291fa4da2ffe5bb8c to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/f3fe92582664bdd6f8db08468f9fd6ce/cf/2b5574e44ba9464291fa4da2ffe5bb8c 2024-12-01T17:11:43,392 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSkipTmp/051bf283f1b7f14edbd2eaee072c1718/cf/07687b8cf8fd41b795a85e3bfce43ff3 to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/051bf283f1b7f14edbd2eaee072c1718/cf/07687b8cf8fd41b795a85e3bfce43ff3 2024-12-01T17:11:43,394 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSkipTmp/f3fe92582664bdd6f8db08468f9fd6ce/recovered.edits/9.seqid to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/f3fe92582664bdd6f8db08468f9fd6ce/recovered.edits/9.seqid 2024-12-01T17:11:43,394 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSkipTmp/051bf283f1b7f14edbd2eaee072c1718/recovered.edits/9.seqid to hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/051bf283f1b7f14edbd2eaee072c1718/recovered.edits/9.seqid 2024-12-01T17:11:43,394 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSkipTmp/f3fe92582664bdd6f8db08468f9fd6ce 2024-12-01T17:11:43,394 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testtb-testExportFileSystemStateWithSkipTmp/051bf283f1b7f14edbd2eaee072c1718 2024-12-01T17:11:43,394 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSkipTmp regions 2024-12-01T17:11:43,396 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=251, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-01T17:11:43,399 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSkipTmp from hbase:meta 2024-12-01T17:11:43,401 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithSkipTmp' descriptor. 2024-12-01T17:11:43,402 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=251, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-01T17:11:43,402 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithSkipTmp' from region states. 2024-12-01T17:11:43,402 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1733073082674.f3fe92582664bdd6f8db08468f9fd6ce.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733073103402"}]},"ts":"9223372036854775807"} 2024-12-01T17:11:43,402 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1733073082674.051bf283f1b7f14edbd2eaee072c1718.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733073103402"}]},"ts":"9223372036854775807"} 2024-12-01T17:11:43,405 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-01T17:11:43,405 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => f3fe92582664bdd6f8db08468f9fd6ce, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733073082674.f3fe92582664bdd6f8db08468f9fd6ce.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 051bf283f1b7f14edbd2eaee072c1718, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733073082674.051bf283f1b7f14edbd2eaee072c1718.', STARTKEY => '1', ENDKEY => ''}] 2024-12-01T17:11:43,405 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithSkipTmp' as deleted. 2024-12-01T17:11:43,405 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733073103405"}]},"ts":"9223372036854775807"} 2024-12-01T17:11:43,408 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSkipTmp state from META 2024-12-01T17:11:43,409 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=251, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-01T17:11:43,411 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=251, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 28 msec 2024-12-01T17:11:43,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-01T17:11:43,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-01T17:11:43,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-01T17:11:43,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-01T17:11:43,457 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-01T17:11:43,458 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-01T17:11:43,459 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-01T17:11:43,459 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-01T17:11:43,467 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-01T17:11:43,467 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-01T17:11:43,467 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-01T17:11:43,467 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:11:43,467 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:11:43,467 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:11:43,467 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-01T17:11:43,467 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-01T17:11:43,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=251 2024-12-01T17:11:43,468 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSkipTmp 2024-12-01T17:11:43,468 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-01T17:11:43,469 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-01T17:11:43,469 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-01T17:11:43,469 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-01T17:11:43,469 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-01T17:11:43,475 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "emptySnaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-12-01T17:11:43,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-01T17:11:43,478 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-12-01T17:11:43,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-01T17:11:43,506 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=826 (was 816) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-69539515_22 at /127.0.0.1:56560 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1346006544_1 at /127.0.0.1:55848 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1396781562) connection to localhost/127.0.0.1:33425 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ApplicationMasterLauncher #19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-8943 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1346006544_1 at /127.0.0.1:51908 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-669833187_22 at /127.0.0.1:47362 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-669833187_22 at /127.0.0.1:47348 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33425 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-69539515_22 at /127.0.0.1:45858 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 156321) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-27 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-26 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=815 (was 823), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=719 (was 718) - SystemLoadAverage LEAK? -, ProcessCount=26 (was 25) - ProcessCount LEAK? -, AvailableMemoryMB=2310 (was 2570) 2024-12-01T17:11:43,507 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=826 is superior to 500 2024-12-01T17:11:43,507 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2377): Stopping mini mapreduce cluster... 2024-12-01T17:11:43,514 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@644929b8{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-01T17:11:43,517 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@331c97f2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T17:11:43,517 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T17:11:43,517 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1b3a1d64{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-01T17:11:43,517 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6c37be72{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/hadoop.log.dir/,STOPPED} 2024-12-01T17:11:43,532 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733072744857_0011_01_000001 is : 143 2024-12-01T17:11:43,542 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_2/usercache/jenkins/appcache/application_1733072744857_0011/container_1733072744857_0011_01_000001/launch_container.sh] 2024-12-01T17:11:43,542 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_2/usercache/jenkins/appcache/application_1733072744857_0011/container_1733072744857_0011_01_000001/container_tokens] 2024-12-01T17:11:43,542 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_1957686627/yarn-6742040670/MiniMRCluster_1957686627-localDir-nm-1_2/usercache/jenkins/appcache/application_1733072744857_0011/container_1733072744857_0011_01_000001/sysfs] 2024-12-01T17:11:47,330 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-01T17:11:48,764 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-01T17:12:00,534 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b3cec68{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-01T17:12:00,535 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1d80ab35{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T17:12:00,535 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T17:12:00,535 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e64c225{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-01T17:12:00,535 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@181c5184{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/hadoop.log.dir/,STOPPED} 2024-12-01T17:12:05,860 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-01T17:12:15,281 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region ced0bf6724c9afd6d078a6aaf8bb47a9, had cached 0 bytes from a total of 5216 2024-12-01T17:12:15,281 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 13964bd13cf62ff2c1f3f282d989070a, had cached 0 bytes from a total of 8394 2024-12-01T17:12:17,552 ERROR [Thread[Thread-404,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-01T17:12:17,552 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3523d319{cluster,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-01T17:12:17,553 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7872cd00{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T17:12:17,553 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T17:12:17,553 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4aaae963{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-01T17:12:17,553 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@634dba7e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/hadoop.log.dir/,STOPPED} 2024-12-01T17:12:17,555 WARN [ApplicationMaster Launcher {}] amlauncher.ApplicationMasterLauncher$LauncherThread(122): org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher$LauncherThread interrupted. Returning. 2024-12-01T17:12:17,560 ERROR [SchedulerEventDispatcher:Event Processor {}] event.EventDispatcher$EventProcessor(72): Returning, interrupted : java.lang.InterruptedException 2024-12-01T17:12:17,560 ERROR [ResourceManager Event Processor Monitor {}] resourcemanager.ResourceManager$SchedulerEventDispatcher$EventProcessorMonitor(1193): Returning, interrupted : java.lang.InterruptedException: sleep interrupted 2024-12-01T17:12:17,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741830_1006 (size=1166464) 2024-12-01T17:12:17,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741830_1006 (size=1166464) 2024-12-01T17:12:17,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741830_1006 (size=1166464) 2024-12-01T17:12:17,567 ERROR [Thread[Thread-427,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-01T17:12:17,570 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7e9879ba{jobhistory,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-01T17:12:17,570 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5dbf86c3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T17:12:17,570 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T17:12:17,570 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d805368{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-01T17:12:17,570 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1e01e198{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/hadoop.log.dir/,STOPPED} 2024-12-01T17:12:17,572 ERROR [Thread[Thread-386,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-01T17:12:17,572 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2380): Mini mapreduce cluster stopped 2024-12-01T17:12:17,572 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-01T17:12:17,572 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-01T17:12:17,572 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-01T17:12:17,572 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:12:17,572 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:12:17,572 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-01T17:12:17,572 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T17:12:17,572 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=603790073, stopped=false 2024-12-01T17:12:17,573 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-01T17:12:17,573 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-01T17:12:17,573 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=b87883a7882d,38737,1733072736902 2024-12-01T17:12:17,697 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-01T17:12:17,697 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T17:12:17,697 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T17:12:17,697 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T17:12:17,697 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T17:12:17,698 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-01T17:12:17,698 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T17:12:17,698 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-01T17:12:17,698 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:12:17,698 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T17:12:17,698 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T17:12:17,698 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T17:12:17,700 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'b87883a7882d,44753,1733072737741' ***** 2024-12-01T17:12:17,700 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T17:12:17,701 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T17:12:17,701 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-01T17:12:17,701 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-01T17:12:17,701 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'b87883a7882d,36521,1733072737896' ***** 2024-12-01T17:12:17,701 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-01T17:12:17,701 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-01T17:12:17,701 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'b87883a7882d,41375,1733072737967' ***** 2024-12-01T17:12:17,701 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-01T17:12:17,701 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-01T17:12:17,702 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T17:12:17,702 INFO [RS:0;b87883a7882d:44753 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-01T17:12:17,702 INFO [RS:1;b87883a7882d:36521 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-01T17:12:17,702 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T17:12:17,702 INFO [RS:2;b87883a7882d:41375 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-01T17:12:17,702 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-01T17:12:17,702 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-01T17:12:17,702 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-01T17:12:17,702 INFO [RS:2;b87883a7882d:41375 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-01T17:12:17,702 INFO [RS:0;b87883a7882d:44753 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-01T17:12:17,702 INFO [RS:2;b87883a7882d:41375 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-01T17:12:17,702 INFO [RS:0;b87883a7882d:44753 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-01T17:12:17,703 INFO [RS:0;b87883a7882d:44753 {}] regionserver.HRegionServer(959): stopping server b87883a7882d,44753,1733072737741 2024-12-01T17:12:17,703 INFO [RS:1;b87883a7882d:36521 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-01T17:12:17,703 INFO [RS:1;b87883a7882d:36521 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-01T17:12:17,703 INFO [RS:0;b87883a7882d:44753 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-01T17:12:17,703 INFO [RS:2;b87883a7882d:41375 {}] regionserver.HRegionServer(3091): Received CLOSE for 13964bd13cf62ff2c1f3f282d989070a 2024-12-01T17:12:17,703 INFO [RS:0;b87883a7882d:44753 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;b87883a7882d:44753. 2024-12-01T17:12:17,703 INFO [RS:1;b87883a7882d:36521 {}] regionserver.HRegionServer(3091): Received CLOSE for 5b10cafeb6a61c25ce1e97f8b1c04b89 2024-12-01T17:12:17,703 DEBUG [RS:0;b87883a7882d:44753 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-01T17:12:17,703 DEBUG [RS:0;b87883a7882d:44753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:12:17,703 INFO [RS:2;b87883a7882d:41375 {}] regionserver.HRegionServer(959): stopping server b87883a7882d,41375,1733072737967 2024-12-01T17:12:17,703 INFO [RS:1;b87883a7882d:36521 {}] regionserver.HRegionServer(3091): Received CLOSE for ced0bf6724c9afd6d078a6aaf8bb47a9 2024-12-01T17:12:17,703 INFO [RS:2;b87883a7882d:41375 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-01T17:12:17,703 INFO [RS:0;b87883a7882d:44753 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-01T17:12:17,703 INFO [RS:1;b87883a7882d:36521 {}] regionserver.HRegionServer(959): stopping server b87883a7882d,36521,1733072737896 2024-12-01T17:12:17,703 INFO [RS:1;b87883a7882d:36521 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-01T17:12:17,703 INFO [RS:0;b87883a7882d:44753 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-01T17:12:17,703 INFO [RS:0;b87883a7882d:44753 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-01T17:12:17,703 INFO [RS:2;b87883a7882d:41375 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;b87883a7882d:41375. 2024-12-01T17:12:17,703 INFO [RS:1;b87883a7882d:36521 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;b87883a7882d:36521. 2024-12-01T17:12:17,703 DEBUG [RS:1;b87883a7882d:36521 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-01T17:12:17,703 INFO [RS:0;b87883a7882d:44753 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-01T17:12:17,703 DEBUG [RS:1;b87883a7882d:36521 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:12:17,703 DEBUG [RS:2;b87883a7882d:41375 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-01T17:12:17,703 DEBUG [RS:2;b87883a7882d:41375 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:12:17,704 INFO [RS:2;b87883a7882d:41375 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-01T17:12:17,704 INFO [RS:1;b87883a7882d:36521 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-01T17:12:17,704 INFO [RS:0;b87883a7882d:44753 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-01T17:12:17,704 DEBUG [RS:1;b87883a7882d:36521 {}] regionserver.HRegionServer(1325): Online Regions={5b10cafeb6a61c25ce1e97f8b1c04b89=hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89., ced0bf6724c9afd6d078a6aaf8bb47a9=testExportExpiredSnapshot,,1733072999934.ced0bf6724c9afd6d078a6aaf8bb47a9.} 2024-12-01T17:12:17,704 DEBUG [RS:2;b87883a7882d:41375 {}] regionserver.HRegionServer(1325): Online Regions={13964bd13cf62ff2c1f3f282d989070a=testExportExpiredSnapshot,1,1733072999934.13964bd13cf62ff2c1f3f282d989070a.} 2024-12-01T17:12:17,704 DEBUG [RS:0;b87883a7882d:44753 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-01T17:12:17,704 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 13964bd13cf62ff2c1f3f282d989070a, disabling compactions & flushes 2024-12-01T17:12:17,704 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1733072999934.13964bd13cf62ff2c1f3f282d989070a. 2024-12-01T17:12:17,704 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1733072999934.13964bd13cf62ff2c1f3f282d989070a. 2024-12-01T17:12:17,704 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1733072999934.13964bd13cf62ff2c1f3f282d989070a. after waiting 0 ms 2024-12-01T17:12:17,704 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1733072999934.13964bd13cf62ff2c1f3f282d989070a. 2024-12-01T17:12:17,704 DEBUG [RS:1;b87883a7882d:36521 {}] regionserver.HRegionServer(1351): Waiting on 5b10cafeb6a61c25ce1e97f8b1c04b89, ced0bf6724c9afd6d078a6aaf8bb47a9 2024-12-01T17:12:17,704 DEBUG [RS:2;b87883a7882d:41375 {}] regionserver.HRegionServer(1351): Waiting on 13964bd13cf62ff2c1f3f282d989070a 2024-12-01T17:12:17,704 DEBUG [RS:0;b87883a7882d:44753 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-01T17:12:17,704 DEBUG [RS_CLOSE_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-01T17:12:17,704 INFO [RS_CLOSE_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-01T17:12:17,704 DEBUG [RS_CLOSE_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-01T17:12:17,704 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 5b10cafeb6a61c25ce1e97f8b1c04b89, disabling compactions & flushes 2024-12-01T17:12:17,704 DEBUG [RS_CLOSE_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-01T17:12:17,704 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89. 2024-12-01T17:12:17,704 DEBUG [RS_CLOSE_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-01T17:12:17,704 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89. 2024-12-01T17:12:17,704 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89. after waiting 0 ms 2024-12-01T17:12:17,704 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89. 2024-12-01T17:12:17,704 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 5b10cafeb6a61c25ce1e97f8b1c04b89 1/1 column families, dataSize=1.65 KB heapSize=3.90 KB 2024-12-01T17:12:17,704 INFO [RS_CLOSE_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=8.76 KB heapSize=14.76 KB 2024-12-01T17:12:17,707 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportExpiredSnapshot/13964bd13cf62ff2c1f3f282d989070a/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-01T17:12:17,707 DEBUG [RS_CLOSE_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740/.tmp/info/8c8893c6bfd24563b16332496c48d91b is 121, key is testtb-testExportFileSystemStateWithSkipTmp,1,1733073082674.051bf283f1b7f14edbd2eaee072c1718./info:/1733073103402/DeleteFamily/seqid=0 2024-12-01T17:12:17,707 INFO [regionserver/b87883a7882d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T17:12:17,707 INFO [regionserver/b87883a7882d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T17:12:17,711 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-01T17:12:17,711 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1733072999934.13964bd13cf62ff2c1f3f282d989070a. 2024-12-01T17:12:17,711 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 13964bd13cf62ff2c1f3f282d989070a: Waiting for close lock at 1733073137703Running coprocessor pre-close hooks at 1733073137704 (+1 ms)Disabling compacts and flushes for region at 1733073137704Disabling writes for close at 1733073137704Writing region close event to WAL at 1733073137705 (+1 ms)Running coprocessor post-close hooks at 1733073137711 (+6 ms)Closed at 1733073137711 2024-12-01T17:12:17,712 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,1,1733072999934.13964bd13cf62ff2c1f3f282d989070a. 2024-12-01T17:12:17,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742416_1592 (size=6387) 2024-12-01T17:12:17,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742416_1592 (size=6387) 2024-12-01T17:12:17,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742416_1592 (size=6387) 2024-12-01T17:12:17,718 INFO [RS_CLOSE_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.91 KB at sequenceid=243 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740/.tmp/info/8c8893c6bfd24563b16332496c48d91b 2024-12-01T17:12:17,719 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/acl/5b10cafeb6a61c25ce1e97f8b1c04b89/.tmp/l/4987505d44264942bdefd7a122df111a is 74, key is testtb-testExportFileSystemStateWithMergeRegion-1/l:/1733072998084/DeleteFamily/seqid=0 2024-12-01T17:12:17,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742417_1593 (size=5860) 2024-12-01T17:12:17,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742417_1593 (size=5860) 2024-12-01T17:12:17,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742417_1593 (size=5860) 2024-12-01T17:12:17,723 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=31 (bloomFilter=false), to=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/acl/5b10cafeb6a61c25ce1e97f8b1c04b89/.tmp/l/4987505d44264942bdefd7a122df111a 2024-12-01T17:12:17,725 DEBUG [RS_CLOSE_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740/.tmp/ns/0f89c85647c44b8a927d5a255e9774be is 119, key is testtb-testExportFileSystemStateWithSkipTmp,1,1733073082674.051bf283f1b7f14edbd2eaee072c1718./ns:/1733073103396/DeleteFamily/seqid=0 2024-12-01T17:12:17,727 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 4987505d44264942bdefd7a122df111a 2024-12-01T17:12:17,728 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/acl/5b10cafeb6a61c25ce1e97f8b1c04b89/.tmp/l/4987505d44264942bdefd7a122df111a as hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/acl/5b10cafeb6a61c25ce1e97f8b1c04b89/l/4987505d44264942bdefd7a122df111a 2024-12-01T17:12:17,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742418_1594 (size=5927) 2024-12-01T17:12:17,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742418_1594 (size=5927) 2024-12-01T17:12:17,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742418_1594 (size=5927) 2024-12-01T17:12:17,730 INFO [RS_CLOSE_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=430 B at sequenceid=243 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740/.tmp/ns/0f89c85647c44b8a927d5a255e9774be 2024-12-01T17:12:17,732 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 4987505d44264942bdefd7a122df111a 2024-12-01T17:12:17,732 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/acl/5b10cafeb6a61c25ce1e97f8b1c04b89/l/4987505d44264942bdefd7a122df111a, entries=14, sequenceid=31, filesize=5.7 K 2024-12-01T17:12:17,733 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1694, heapSize ~3.88 KB/3976, currentSize=0 B/0 for 5b10cafeb6a61c25ce1e97f8b1c04b89 in 29ms, sequenceid=31, compaction requested=false 2024-12-01T17:12:17,736 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/acl/5b10cafeb6a61c25ce1e97f8b1c04b89/recovered.edits/34.seqid, newMaxSeqId=34, maxSeqId=1 2024-12-01T17:12:17,736 DEBUG [RS_CLOSE_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740/.tmp/rep_barrier/bc245af4a02a434c8074ece0135ec5b9 is 128, key is testtb-testExportFileSystemStateWithSkipTmp,1,1733073082674.051bf283f1b7f14edbd2eaee072c1718./rep_barrier:/1733073103396/DeleteFamily/seqid=0 2024-12-01T17:12:17,736 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-01T17:12:17,736 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89. 2024-12-01T17:12:17,736 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 5b10cafeb6a61c25ce1e97f8b1c04b89: Waiting for close lock at 1733073137704Running coprocessor pre-close hooks at 1733073137704Disabling compacts and flushes for region at 1733073137704Disabling writes for close at 1733073137704Obtaining lock to block concurrent updates at 1733073137704Preparing flush snapshotting stores in 5b10cafeb6a61c25ce1e97f8b1c04b89 at 1733073137704Finished memstore snapshotting hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89., syncing WAL and waiting on mvcc, flushsize=dataSize=1694, getHeapSize=3976, getOffHeapSize=0, getCellsCount=27 at 1733073137704Flushing stores of hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89. at 1733073137705 (+1 ms)Flushing 5b10cafeb6a61c25ce1e97f8b1c04b89/l: creating writer at 1733073137705Flushing 5b10cafeb6a61c25ce1e97f8b1c04b89/l: appending metadata at 1733073137718 (+13 ms)Flushing 5b10cafeb6a61c25ce1e97f8b1c04b89/l: closing flushed file at 1733073137718Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1645dca: reopening flushed file at 1733073137727 (+9 ms)Finished flush of dataSize ~1.65 KB/1694, heapSize ~3.88 KB/3976, currentSize=0 B/0 for 5b10cafeb6a61c25ce1e97f8b1c04b89 in 29ms, sequenceid=31, compaction requested=false at 1733073137733 (+6 ms)Writing region close event to WAL at 1733073137734 (+1 ms)Running coprocessor post-close hooks at 1733073137736 (+2 ms)Closed at 1733073137736 2024-12-01T17:12:17,736 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:acl,,1733072741186.5b10cafeb6a61c25ce1e97f8b1c04b89. 2024-12-01T17:12:17,737 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing ced0bf6724c9afd6d078a6aaf8bb47a9, disabling compactions & flushes 2024-12-01T17:12:17,737 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1733072999934.ced0bf6724c9afd6d078a6aaf8bb47a9. 2024-12-01T17:12:17,737 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1733072999934.ced0bf6724c9afd6d078a6aaf8bb47a9. 2024-12-01T17:12:17,737 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1733072999934.ced0bf6724c9afd6d078a6aaf8bb47a9. after waiting 0 ms 2024-12-01T17:12:17,737 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1733072999934.ced0bf6724c9afd6d078a6aaf8bb47a9. 2024-12-01T17:12:17,740 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/default/testExportExpiredSnapshot/ced0bf6724c9afd6d078a6aaf8bb47a9/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-01T17:12:17,740 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-01T17:12:17,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742419_1595 (size=5990) 2024-12-01T17:12:17,740 INFO [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1733072999934.ced0bf6724c9afd6d078a6aaf8bb47a9. 2024-12-01T17:12:17,740 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for ced0bf6724c9afd6d078a6aaf8bb47a9: Waiting for close lock at 1733073137737Running coprocessor pre-close hooks at 1733073137737Disabling compacts and flushes for region at 1733073137737Disabling writes for close at 1733073137737Writing region close event to WAL at 1733073137737Running coprocessor post-close hooks at 1733073137740 (+3 ms)Closed at 1733073137740 2024-12-01T17:12:17,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742419_1595 (size=5990) 2024-12-01T17:12:17,741 DEBUG [RS_CLOSE_REGION-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,,1733072999934.ced0bf6724c9afd6d078a6aaf8bb47a9. 2024-12-01T17:12:17,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742419_1595 (size=5990) 2024-12-01T17:12:17,741 INFO [RS_CLOSE_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=466 B at sequenceid=243 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740/.tmp/rep_barrier/bc245af4a02a434c8074ece0135ec5b9 2024-12-01T17:12:17,746 DEBUG [RS_CLOSE_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740/.tmp/table/893cd1087e8b459a8daee129f07d9ad1 is 122, key is testtb-testExportFileSystemStateWithSkipTmp,1,1733073082674.051bf283f1b7f14edbd2eaee072c1718./table:/1733073103396/DeleteFamily/seqid=0 2024-12-01T17:12:17,750 INFO [regionserver/b87883a7882d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T17:12:17,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742420_1596 (size=6012) 2024-12-01T17:12:17,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742420_1596 (size=6012) 2024-12-01T17:12:17,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742420_1596 (size=6012) 2024-12-01T17:12:17,750 INFO [RS_CLOSE_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=996 B at sequenceid=243 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740/.tmp/table/893cd1087e8b459a8daee129f07d9ad1 2024-12-01T17:12:17,754 DEBUG [RS_CLOSE_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740/.tmp/info/8c8893c6bfd24563b16332496c48d91b as hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740/info/8c8893c6bfd24563b16332496c48d91b 2024-12-01T17:12:17,758 INFO [RS_CLOSE_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740/info/8c8893c6bfd24563b16332496c48d91b, entries=8, sequenceid=243, filesize=6.2 K 2024-12-01T17:12:17,758 DEBUG [RS_CLOSE_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740/.tmp/ns/0f89c85647c44b8a927d5a255e9774be as hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740/ns/0f89c85647c44b8a927d5a255e9774be 2024-12-01T17:12:17,762 INFO [RS_CLOSE_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740/ns/0f89c85647c44b8a927d5a255e9774be, entries=4, sequenceid=243, filesize=5.8 K 2024-12-01T17:12:17,763 DEBUG [RS_CLOSE_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740/.tmp/rep_barrier/bc245af4a02a434c8074ece0135ec5b9 as hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740/rep_barrier/bc245af4a02a434c8074ece0135ec5b9 2024-12-01T17:12:17,766 INFO [RS_CLOSE_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740/rep_barrier/bc245af4a02a434c8074ece0135ec5b9, entries=4, sequenceid=243, filesize=5.8 K 2024-12-01T17:12:17,767 DEBUG [RS_CLOSE_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740/.tmp/table/893cd1087e8b459a8daee129f07d9ad1 as hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740/table/893cd1087e8b459a8daee129f07d9ad1 2024-12-01T17:12:17,771 INFO [RS_CLOSE_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740/table/893cd1087e8b459a8daee129f07d9ad1, entries=6, sequenceid=243, filesize=5.9 K 2024-12-01T17:12:17,772 INFO [RS_CLOSE_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~8.76 KB/8966, heapSize ~14.70 KB/15048, currentSize=0 B/0 for 1588230740 in 67ms, sequenceid=243, compaction requested=false 2024-12-01T17:12:17,775 DEBUG [RS_CLOSE_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/data/hbase/meta/1588230740/recovered.edits/246.seqid, newMaxSeqId=246, maxSeqId=1 2024-12-01T17:12:17,776 DEBUG [RS_CLOSE_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-01T17:12:17,776 DEBUG [RS_CLOSE_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-01T17:12:17,776 INFO [RS_CLOSE_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-01T17:12:17,776 DEBUG [RS_CLOSE_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733073137704Running coprocessor pre-close hooks at 1733073137704Disabling compacts and flushes for region at 1733073137704Disabling writes for close at 1733073137704Obtaining lock to block concurrent updates at 1733073137704Preparing flush snapshotting stores in 1588230740 at 1733073137704Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=8966, getHeapSize=15048, getOffHeapSize=0, getCellsCount=68 at 1733073137704Flushing stores of hbase:meta,,1.1588230740 at 1733073137705 (+1 ms)Flushing 1588230740/info: creating writer at 1733073137705Flushing 1588230740/info: appending metadata at 1733073137707 (+2 ms)Flushing 1588230740/info: closing flushed file at 1733073137707Flushing 1588230740/ns: creating writer at 1733073137722 (+15 ms)Flushing 1588230740/ns: appending metadata at 1733073137724 (+2 ms)Flushing 1588230740/ns: closing flushed file at 1733073137724Flushing 1588230740/rep_barrier: creating writer at 1733073137734 (+10 ms)Flushing 1588230740/rep_barrier: appending metadata at 1733073137736 (+2 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1733073137736Flushing 1588230740/table: creating writer at 1733073137744 (+8 ms)Flushing 1588230740/table: appending metadata at 1733073137746 (+2 ms)Flushing 1588230740/table: closing flushed file at 1733073137746Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@33f19fbf: reopening flushed file at 1733073137754 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6947c19f: reopening flushed file at 1733073137758 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1a936809: reopening flushed file at 1733073137762 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@59a1eea4: reopening flushed file at 1733073137766 (+4 ms)Finished flush of dataSize ~8.76 KB/8966, heapSize ~14.70 KB/15048, currentSize=0 B/0 for 1588230740 in 67ms, sequenceid=243, compaction requested=false at 1733073137772 (+6 ms)Writing region close event to WAL at 1733073137773 (+1 ms)Running coprocessor post-close hooks at 1733073137776 (+3 ms)Closed at 1733073137776 2024-12-01T17:12:17,776 DEBUG [RS_CLOSE_META-regionserver/b87883a7882d:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-01T17:12:17,904 INFO [RS:0;b87883a7882d:44753 {}] regionserver.HRegionServer(976): stopping server b87883a7882d,44753,1733072737741; all regions closed. 2024-12-01T17:12:17,904 INFO [RS:2;b87883a7882d:41375 {}] regionserver.HRegionServer(976): stopping server b87883a7882d,41375,1733072737967; all regions closed. 2024-12-01T17:12:17,904 INFO [RS:1;b87883a7882d:36521 {}] regionserver.HRegionServer(976): stopping server b87883a7882d,36521,1733072737896; all regions closed. 2024-12-01T17:12:17,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741836_1012 (size=102506) 2024-12-01T17:12:17,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741836_1012 (size=102506) 2024-12-01T17:12:17,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741836_1012 (size=102506) 2024-12-01T17:12:17,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741833_1009 (size=19839) 2024-12-01T17:12:17,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741833_1009 (size=19839) 2024-12-01T17:12:17,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741833_1009 (size=19839) 2024-12-01T17:12:17,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741835_1011 (size=8890) 2024-12-01T17:12:17,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741835_1011 (size=8890) 2024-12-01T17:12:17,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741835_1011 (size=8890) 2024-12-01T17:12:17,915 DEBUG [RS:1;b87883a7882d:36521 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/oldWALs 2024-12-01T17:12:17,915 DEBUG [RS:0;b87883a7882d:44753 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/oldWALs 2024-12-01T17:12:17,915 INFO [RS:0;b87883a7882d:44753 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL b87883a7882d%2C44753%2C1733072737741.meta:.meta(num 1733072740606) 2024-12-01T17:12:17,915 INFO [RS:1;b87883a7882d:36521 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL b87883a7882d%2C36521%2C1733072737896:(num 1733072740223) 2024-12-01T17:12:17,915 DEBUG [RS:1;b87883a7882d:36521 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:12:17,915 INFO [RS:1;b87883a7882d:36521 {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T17:12:17,916 DEBUG [RS:2;b87883a7882d:41375 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/oldWALs 2024-12-01T17:12:17,916 INFO [RS:2;b87883a7882d:41375 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL b87883a7882d%2C41375%2C1733072737967:(num 1733072740223) 2024-12-01T17:12:17,916 INFO [RS:1;b87883a7882d:36521 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-01T17:12:17,916 DEBUG [RS:2;b87883a7882d:41375 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:12:17,916 INFO [RS:2;b87883a7882d:41375 {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T17:12:17,916 INFO [RS:2;b87883a7882d:41375 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-01T17:12:17,916 INFO [RS:1;b87883a7882d:36521 {}] hbase.ChoreService(370): Chore service for: regionserver/b87883a7882d:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-01T17:12:17,916 INFO [RS:2;b87883a7882d:41375 {}] hbase.ChoreService(370): Chore service for: regionserver/b87883a7882d:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-01T17:12:17,916 INFO [RS:2;b87883a7882d:41375 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-01T17:12:17,916 INFO [RS:1;b87883a7882d:36521 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-01T17:12:17,916 INFO [RS:2;b87883a7882d:41375 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-01T17:12:17,916 INFO [RS:1;b87883a7882d:36521 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-01T17:12:17,916 INFO [RS:2;b87883a7882d:41375 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-01T17:12:17,916 INFO [RS:1;b87883a7882d:36521 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-01T17:12:17,916 INFO [RS:1;b87883a7882d:36521 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-01T17:12:17,916 INFO [RS:2;b87883a7882d:41375 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-01T17:12:17,916 INFO [regionserver/b87883a7882d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-01T17:12:17,916 INFO [regionserver/b87883a7882d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-01T17:12:17,916 INFO [RS:2;b87883a7882d:41375 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:41375 2024-12-01T17:12:17,916 INFO [RS:1;b87883a7882d:36521 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:36521 2024-12-01T17:12:17,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741834_1010 (size=15065) 2024-12-01T17:12:17,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073741834_1010 (size=15065) 2024-12-01T17:12:17,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741834_1010 (size=15065) 2024-12-01T17:12:17,921 DEBUG [RS:0;b87883a7882d:44753 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/oldWALs 2024-12-01T17:12:17,921 INFO [RS:0;b87883a7882d:44753 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL b87883a7882d%2C44753%2C1733072737741:(num 1733072740223) 2024-12-01T17:12:17,921 DEBUG [RS:0;b87883a7882d:44753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T17:12:17,921 INFO [RS:0;b87883a7882d:44753 {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T17:12:17,921 INFO [RS:0;b87883a7882d:44753 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-01T17:12:17,921 INFO [RS:0;b87883a7882d:44753 {}] hbase.ChoreService(370): Chore service for: regionserver/b87883a7882d:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-01T17:12:17,921 INFO [RS:0;b87883a7882d:44753 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-01T17:12:17,921 INFO [regionserver/b87883a7882d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-01T17:12:17,921 INFO [RS:0;b87883a7882d:44753 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:44753 2024-12-01T17:12:17,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T17:12:17,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b87883a7882d,36521,1733072737896 2024-12-01T17:12:17,927 INFO [RS:1;b87883a7882d:36521 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-01T17:12:17,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b87883a7882d,41375,1733072737967 2024-12-01T17:12:17,938 INFO [RS:2;b87883a7882d:41375 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-01T17:12:17,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b87883a7882d,44753,1733072737741 2024-12-01T17:12:17,938 INFO [RS:0;b87883a7882d:44753 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-01T17:12:17,949 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b87883a7882d,44753,1733072737741] 2024-12-01T17:12:17,969 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/b87883a7882d,44753,1733072737741 already deleted, retry=false 2024-12-01T17:12:17,970 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; b87883a7882d,44753,1733072737741 expired; onlineServers=2 2024-12-01T17:12:17,970 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b87883a7882d,41375,1733072737967] 2024-12-01T17:12:17,980 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/b87883a7882d,41375,1733072737967 already deleted, retry=false 2024-12-01T17:12:17,980 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; b87883a7882d,41375,1733072737967 expired; onlineServers=1 2024-12-01T17:12:17,980 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b87883a7882d,36521,1733072737896] 2024-12-01T17:12:17,991 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/b87883a7882d,36521,1733072737896 already deleted, retry=false 2024-12-01T17:12:17,991 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; b87883a7882d,36521,1733072737896 expired; onlineServers=0 2024-12-01T17:12:17,991 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'b87883a7882d,38737,1733072736902' ***** 2024-12-01T17:12:17,991 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-01T17:12:17,992 INFO [M:0;b87883a7882d:38737 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-01T17:12:17,992 INFO [M:0;b87883a7882d:38737 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-01T17:12:17,992 DEBUG [M:0;b87883a7882d:38737 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-01T17:12:17,992 DEBUG [M:0;b87883a7882d:38737 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-01T17:12:17,992 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-01T17:12:17,992 DEBUG [master/b87883a7882d:0:becomeActiveMaster-HFileCleaner.large.0-1733072739764 {}] cleaner.HFileCleaner(306): Exit Thread[master/b87883a7882d:0:becomeActiveMaster-HFileCleaner.large.0-1733072739764,5,FailOnTimeoutGroup] 2024-12-01T17:12:17,992 DEBUG [master/b87883a7882d:0:becomeActiveMaster-HFileCleaner.small.0-1733072739768 {}] cleaner.HFileCleaner(306): Exit Thread[master/b87883a7882d:0:becomeActiveMaster-HFileCleaner.small.0-1733072739768,5,FailOnTimeoutGroup] 2024-12-01T17:12:17,993 INFO [M:0;b87883a7882d:38737 {}] hbase.ChoreService(370): Chore service for: master/b87883a7882d:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-01T17:12:17,993 INFO [M:0;b87883a7882d:38737 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-01T17:12:17,994 DEBUG [M:0;b87883a7882d:38737 {}] master.HMaster(1795): Stopping service threads 2024-12-01T17:12:17,994 INFO [M:0;b87883a7882d:38737 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-01T17:12:17,994 INFO [M:0;b87883a7882d:38737 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-01T17:12:17,995 INFO [M:0;b87883a7882d:38737 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-01T17:12:17,996 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-01T17:12:18,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-01T17:12:18,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T17:12:18,001 DEBUG [M:0;b87883a7882d:38737 {}] zookeeper.ZKUtil(347): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-01T17:12:18,001 WARN [M:0;b87883a7882d:38737 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-01T17:12:18,002 INFO [M:0;b87883a7882d:38737 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/.lastflushedseqids 2024-12-01T17:12:18,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073742421_1597 (size=325) 2024-12-01T17:12:18,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44603 is added to blk_1073742421_1597 (size=325) 2024-12-01T17:12:18,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073742421_1597 (size=325) 2024-12-01T17:12:18,015 INFO [M:0;b87883a7882d:38737 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-01T17:12:18,015 INFO [M:0;b87883a7882d:38737 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-01T17:12:18,015 DEBUG [M:0;b87883a7882d:38737 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-01T17:12:18,026 INFO [M:0;b87883a7882d:38737 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T17:12:18,026 DEBUG [M:0;b87883a7882d:38737 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T17:12:18,026 DEBUG [M:0;b87883a7882d:38737 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-01T17:12:18,026 DEBUG [M:0;b87883a7882d:38737 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T17:12:18,026 INFO [M:0;b87883a7882d:38737 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=991.95 KB heapSize=1.16 MB 2024-12-01T17:12:18,027 ERROR [AsyncFSWAL-0-hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/MasterData-prefix:b87883a7882d,38737,1733072736902 {}] server.NIOServerCnxnFactory(85): Thread Thread[AsyncFSWAL-0-hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/MasterData-prefix:b87883a7882d,38737,1733072736902,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.readableBytes()" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.buffered(FanOutOneBlockAsyncDFSOutput.java:414) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.append(AsyncProtobufLogWriter.java:134) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:181) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:100) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendEntry(AbstractFSWAL.java:1333) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendAndSync(AbstractFSWAL.java:1724) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.consume(AbstractFSWAL.java:1832) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T17:12:18,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T17:12:18,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36521-0x10191db4a6c0002, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T17:12:18,050 INFO [RS:1;b87883a7882d:36521 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-01T17:12:18,050 INFO [RS:1;b87883a7882d:36521 {}] regionserver.HRegionServer(1031): Exiting; stopping=b87883a7882d,36521,1733072737896; zookeeper connection closed. 2024-12-01T17:12:18,050 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1769cafa {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1769cafa 2024-12-01T17:12:18,059 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T17:12:18,059 INFO [RS:0;b87883a7882d:44753 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-01T17:12:18,059 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T17:12:18,059 INFO [RS:2;b87883a7882d:41375 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-01T17:12:18,059 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41375-0x10191db4a6c0003, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T17:12:18,059 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44753-0x10191db4a6c0001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T17:12:18,059 INFO [RS:0;b87883a7882d:44753 {}] regionserver.HRegionServer(1031): Exiting; stopping=b87883a7882d,44753,1733072737741; zookeeper connection closed. 2024-12-01T17:12:18,059 INFO [RS:2;b87883a7882d:41375 {}] regionserver.HRegionServer(1031): Exiting; stopping=b87883a7882d,41375,1733072737967; zookeeper connection closed. 2024-12-01T17:12:18,060 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@44a1a286 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@44a1a286 2024-12-01T17:12:18,060 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@51d75996 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@51d75996 2024-12-01T17:12:18,061 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-01T17:12:23,234 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-01T17:12:27,330 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-01T17:12:27,330 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-01T17:12:27,330 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-01T17:12:27,331 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-01T17:12:27,331 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-01T17:12:27,331 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-01T17:12:27,331 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-01T17:12:27,331 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-01T17:12:32,834 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-01T17:12:35,861 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-01T17:13:05,861 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;b87883a7882d:38737 234 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 24 Waited count: 19 Waiting on java.lang.ref.ReferenceQueue$Lock@5c8be0e0 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 22 Waited count: 23 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 26 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27b4c4d8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 23 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 4693 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 48 Waiting on java.util.concurrent.CountDownLatch$Sync@24fd6ea Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 13106 Waited count: 13780 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 15 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@6909b09a Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@689a2b67 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@1f9f0348): State: TIMED_WAITING Blocked count: 1 Waited count: 933 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 94 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp9797813-37): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f9a2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp9797813-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f9a2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp9797813-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f9a2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp9797813-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f9a2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp9797813-41-acceptor-0@6b1a259b-ServerConnector@39eef6d0{HTTP/1.1, (http/1.1)}{localhost:40833}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp9797813-42): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp9797813-43): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp9797813-44): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-7ee1d391-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 47 Waited count: 3190 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@212971b7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 44451): State: TIMED_WAITING Blocked count: 1 Waited count: 48 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 94 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@39923e56): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 157 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@35eaad8b): State: TIMED_WAITING Blocked count: 0 Waited count: 94 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 156 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 45898 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1440 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4777277e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 44451): State: TIMED_WAITING Blocked count: 95 Waited count: 2475 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 44451): State: TIMED_WAITING Blocked count: 106 Waited count: 2492 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 44451): State: TIMED_WAITING Blocked count: 112 Waited count: 2481 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 44451): State: TIMED_WAITING Blocked count: 107 Waited count: 2475 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 44451): State: TIMED_WAITING Blocked count: 100 Waited count: 2482 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@3d8a563f): State: TIMED_WAITING Blocked count: 0 Waited count: 233 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@d1325d8): State: TIMED_WAITING Blocked count: 0 Waited count: 94 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@33c4416c): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@69c9e3df): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(253478452)): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1860921369-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f9a2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1860921369-88-acceptor-0@221020f4-ServerConnector@451b769{HTTP/1.1, (http/1.1)}{localhost:43843}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1860921369-89): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1860921369-90): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-c4ee99d-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@2d1969d0): State: TIMED_WAITING Blocked count: 0 Waited count: 930 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 45669): State: TIMED_WAITING Blocked count: 1 Waited count: 48 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 93 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 0 Waited count: 330 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c6547d3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-371634539-172.17.0.3-1733072730982 heartbeating to localhost/127.0.0.1:44451): State: TIMED_WAITING Blocked count: 1467 Waited count: 1522 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7e2c4b02): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 45669): State: TIMED_WAITING Blocked count: 0 Waited count: 471 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 45669): State: TIMED_WAITING Blocked count: 0 Waited count: 466 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 45669): State: TIMED_WAITING Blocked count: 0 Waited count: 465 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 45669): State: TIMED_WAITING Blocked count: 0 Waited count: 468 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 45669): State: TIMED_WAITING Blocked count: 0 Waited count: 468 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1763905095-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f9a2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1763905095-122-acceptor-0@4c1095b9-ServerConnector@2eec8e07{HTTP/1.1, (http/1.1)}{localhost:41601}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1763905095-123): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1763905095-124): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-4d716bcd-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (IPC Client (1396781562) connection to localhost/127.0.0.1:44451 from jenkins): State: TIMED_WAITING Blocked count: 1367 Waited count: 1367 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 119 (IPC Parameter Sending Thread for localhost/127.0.0.1:44451): State: TIMED_WAITING Blocked count: 0 Waited count: 2117 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@238882f1): State: TIMED_WAITING Blocked count: 0 Waited count: 929 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 38135): State: TIMED_WAITING Blocked count: 1 Waited count: 48 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 93 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 0 Waited count: 316 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4450c098 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-371634539-172.17.0.3-1733072730982 heartbeating to localhost/127.0.0.1:44451): State: TIMED_WAITING Blocked count: 1448 Waited count: 1526 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@717de3e2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 38135): State: TIMED_WAITING Blocked count: 0 Waited count: 472 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 38135): State: TIMED_WAITING Blocked count: 0 Waited count: 490 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 38135): State: TIMED_WAITING Blocked count: 0 Waited count: 481 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 38135): State: TIMED_WAITING Blocked count: 0 Waited count: 469 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 38135): State: TIMED_WAITING Blocked count: 0 Waited count: 479 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp170186225-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f9a2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp170186225-154-acceptor-0@6ec10c9e-ServerConnector@19f9049{HTTP/1.1, (http/1.1)}{localhost:33229}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp170186225-155): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp170186225-156): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-11c75c94-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@33f6f20c): State: TIMED_WAITING Blocked count: 0 Waited count: 929 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 41295): State: TIMED_WAITING Blocked count: 1 Waited count: 48 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 93 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 307 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@69873f16 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-371634539-172.17.0.3-1733072730982 heartbeating to localhost/127.0.0.1:44451): State: TIMED_WAITING Blocked count: 1405 Waited count: 1533 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7628a0f9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 41295): State: TIMED_WAITING Blocked count: 0 Waited count: 465 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 41295): State: TIMED_WAITING Blocked count: 0 Waited count: 479 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 41295): State: TIMED_WAITING Blocked count: 0 Waited count: 487 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 41295): State: TIMED_WAITING Blocked count: 0 Waited count: 484 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 41295): State: TIMED_WAITING Blocked count: 0 Waited count: 488 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data3)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data1)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data4)): State: TIMED_WAITING Blocked count: 10 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data2)): State: TIMED_WAITING Blocked count: 12 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data1/current/BP-371634539-172.17.0.3-1733072730982): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data3/current/BP-371634539-172.17.0.3-1733072730982): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data2/current/BP-371634539-172.17.0.3-1733072730982): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data4/current/BP-371634539-172.17.0.3-1733072730982): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 203 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data6)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 214 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data6/current/BP-371634539-172.17.0.3-1733072730982): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 216 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data5/current/BP-371634539-172.17.0.3-1733072730982): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 222 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 223 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (java.util.concurrent.ThreadPoolExecutor$Worker@6f6a87af[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (java.util.concurrent.ThreadPoolExecutor$Worker@37527041[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 233 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 236 (java.util.concurrent.ThreadPoolExecutor$Worker@201a5c2b[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 16 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 239 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:56318): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 238 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 47 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 242 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 232 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 243 (SyncThread:0): State: WAITING Blocked count: 14 Waited count: 350 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@125e2f8c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 244 (ProcessThread(sid:0 cport:56318):): State: WAITING Blocked count: 0 Waited count: 450 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33e52a59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 245 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 481 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@57fd6504 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 246 (NIOWorkerThread-1): State: WAITING Blocked count: 3 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 248 (LeaseRenewer:jenkins@localhost:44451): State: TIMED_WAITING Blocked count: 13 Waited count: 482 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@60852fa1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 320 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 24 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (Time-limited test-SendThread(127.0.0.1:56318)): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 261 (Time-limited test-EventThread): State: WAITING Blocked count: 13 Waited count: 57 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46c1f91e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 262 (NIOWorkerThread-2): State: WAITING Blocked count: 4 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-3): State: WAITING Blocked count: 7 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-4): State: WAITING Blocked count: 3 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (zk-event-processor-pool-0): State: WAITING Blocked count: 23 Waited count: 71 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6734b74e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-5): State: WAITING Blocked count: 0 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-6): State: WAITING Blocked count: 2 Waited count: 94 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-7): State: WAITING Blocked count: 3 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-9): State: WAITING Blocked count: 4 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-10): State: WAITING Blocked count: 4 Waited count: 94 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 94 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-12): State: WAITING Blocked count: 4 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-13): State: WAITING Blocked count: 4 Waited count: 94 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-14): State: WAITING Blocked count: 3 Waited count: 94 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-15): State: WAITING Blocked count: 5 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-16): State: WAITING Blocked count: 1 Waited count: 94 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38737): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2e85642e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737): State: WAITING Blocked count: 130 Waited count: 573 Waiting on java.util.concurrent.Semaphore$NonfairSync@17245fd2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737): State: WAITING Blocked count: 111 Waited count: 436 Waiting on java.util.concurrent.Semaphore$NonfairSync@5e644d21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38737): State: WAITING Blocked count: 89 Waited count: 8314 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c281f64 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38737): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1989acdb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38737): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1989acdb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=38737): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2ce6baf7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=38737): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@5017e70f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=38737): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@c980156 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=38737): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@58239b6a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 291 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@418ba39b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 292 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 2 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 314 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 336 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 68 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (M:0;b87883a7882d:38737): State: TIMED_WAITING Blocked count: 12 Waited count: 4009 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1111/0x00007f9a20f97948.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 359 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 47 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (master/b87883a7882d:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (master/b87883a7882d:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 365 (org.apache.hadoop.hdfs.PeerCache@28d4ea9c): State: TIMED_WAITING Blocked count: 0 Waited count: 154 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 384 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 4591 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 401 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 62 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 402 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 76 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 415 (Idle-Rpc-Conn-Sweeper-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 148 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 426 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 46 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 414 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 45803 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 457 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 15 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 458 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 460 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7f0cd76 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 483 (regionserver/b87883a7882d:0.procedureResultReporter): State: WAITING Blocked count: 19 Waited count: 39 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@69a0c9c5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 484 (regionserver/b87883a7882d:0.procedureResultReporter): State: WAITING Blocked count: 12 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@44f94ad9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 485 (regionserver/b87883a7882d:0.procedureResultReporter): State: WAITING Blocked count: 24 Waited count: 39 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@39e1bbbd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 511 (LeaseRenewer:jenkins.hfs.1@localhost:44451): State: TIMED_WAITING Blocked count: 16 Waited count: 480 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 512 (LeaseRenewer:jenkins.hfs.0@localhost:44451): State: TIMED_WAITING Blocked count: 17 Waited count: 481 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 515 (LeaseRenewer:jenkins.hfs.2@localhost:44451): State: TIMED_WAITING Blocked count: 15 Waited count: 481 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 520 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 531 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 532 (region-location-0): State: WAITING Blocked count: 12 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@38fb4a8a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 413 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 45662 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 544 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 547 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 550 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 564 (ForkJoinPool.commonPool-worker-1): State: WAITING Blocked count: 0 Waited count: 542 Waiting on java.util.concurrent.ForkJoinPool@57fa42b1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 566 (ForkJoinPool.commonPool-worker-3): State: WAITING Blocked count: 0 Waited count: 763 Waiting on java.util.concurrent.ForkJoinPool@57fa42b1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 580 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 594 (region-location-1): State: WAITING Blocked count: 5 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@38fb4a8a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 595 (region-location-2): State: WAITING Blocked count: 3 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@38fb4a8a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 997 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 795 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1057 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1096 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1100 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 70 Waited count: 116 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6dd301d0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1145 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1204 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1205 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1206 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1254 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1255 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1256 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1258 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1259 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1618 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 47 Waiting on java.util.TaskQueue@f9eb9fa Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1980 (region-location-3): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@38fb4a8a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1981 (region-location-4): State: WAITING Blocked count: 4 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@38fb4a8a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2176 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2291 (ForkJoinPool.commonPool-worker-4): State: TIMED_WAITING Blocked count: 0 Waited count: 566 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 8525 (ForkJoinPool.commonPool-worker-6): State: WAITING Blocked count: 0 Waited count: 23 Waiting on java.util.concurrent.ForkJoinPool@57fa42b1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 10272 (AsyncFSWAL-1-hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/MasterData-prefix:b87883a7882d,38737,1733072736902): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a748d72 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10276 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-01T17:13:35,861 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-01T17:14:05,862 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;b87883a7882d:38737 229 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 24 Waited count: 19 Waiting on java.lang.ref.ReferenceQueue$Lock@5c8be0e0 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 22 Waited count: 24 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27b4c4d8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 26 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5293 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 54 Waiting on java.util.concurrent.CountDownLatch$Sync@7dadffb5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 13106 Waited count: 13781 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 15 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@6909b09a Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@689a2b67 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@1f9f0348): State: TIMED_WAITING Blocked count: 1 Waited count: 1053 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 106 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp9797813-37): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f9a2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp9797813-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f9a2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp9797813-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f9a2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp9797813-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f9a2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp9797813-41-acceptor-0@6b1a259b-ServerConnector@39eef6d0{HTTP/1.1, (http/1.1)}{localhost:40833}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp9797813-42): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp9797813-43): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp9797813-44): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-7ee1d391-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 47 Waited count: 3190 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@212971b7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 44451): State: TIMED_WAITING Blocked count: 1 Waited count: 54 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 106 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@39923e56): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 177 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@35eaad8b): State: TIMED_WAITING Blocked count: 0 Waited count: 106 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 176 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 51826 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1440 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4777277e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 44451): State: TIMED_WAITING Blocked count: 95 Waited count: 2535 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 44451): State: TIMED_WAITING Blocked count: 106 Waited count: 2552 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 44451): State: TIMED_WAITING Blocked count: 112 Waited count: 2541 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 44451): State: TIMED_WAITING Blocked count: 107 Waited count: 2535 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 44451): State: TIMED_WAITING Blocked count: 100 Waited count: 2542 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@3d8a563f): State: TIMED_WAITING Blocked count: 0 Waited count: 263 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@d1325d8): State: TIMED_WAITING Blocked count: 0 Waited count: 106 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@33c4416c): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@69c9e3df): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(253478452)): State: TIMED_WAITING Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1860921369-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f9a2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1860921369-88-acceptor-0@221020f4-ServerConnector@451b769{HTTP/1.1, (http/1.1)}{localhost:43843}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1860921369-89): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1860921369-90): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-c4ee99d-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@2d1969d0): State: TIMED_WAITING Blocked count: 0 Waited count: 1050 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 45669): State: TIMED_WAITING Blocked count: 1 Waited count: 54 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 105 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 0 Waited count: 350 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c6547d3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-371634539-172.17.0.3-1733072730982 heartbeating to localhost/127.0.0.1:44451): State: TIMED_WAITING Blocked count: 1487 Waited count: 1562 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7e2c4b02): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 45669): State: TIMED_WAITING Blocked count: 0 Waited count: 531 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 45669): State: TIMED_WAITING Blocked count: 0 Waited count: 526 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 45669): State: TIMED_WAITING Blocked count: 0 Waited count: 525 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 45669): State: TIMED_WAITING Blocked count: 0 Waited count: 528 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 45669): State: TIMED_WAITING Blocked count: 0 Waited count: 528 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1763905095-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f9a2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1763905095-122-acceptor-0@4c1095b9-ServerConnector@2eec8e07{HTTP/1.1, (http/1.1)}{localhost:41601}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1763905095-123): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1763905095-124): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-4d716bcd-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (IPC Client (1396781562) connection to localhost/127.0.0.1:44451 from jenkins): State: TIMED_WAITING Blocked count: 1427 Waited count: 1427 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 119 (IPC Parameter Sending Thread for localhost/127.0.0.1:44451): State: TIMED_WAITING Blocked count: 0 Waited count: 2177 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@238882f1): State: TIMED_WAITING Blocked count: 0 Waited count: 1049 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 38135): State: TIMED_WAITING Blocked count: 1 Waited count: 54 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 105 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 0 Waited count: 336 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4450c098 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-371634539-172.17.0.3-1733072730982 heartbeating to localhost/127.0.0.1:44451): State: TIMED_WAITING Blocked count: 1468 Waited count: 1566 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@717de3e2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 38135): State: TIMED_WAITING Blocked count: 0 Waited count: 532 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 38135): State: TIMED_WAITING Blocked count: 0 Waited count: 550 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 38135): State: TIMED_WAITING Blocked count: 0 Waited count: 541 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 38135): State: TIMED_WAITING Blocked count: 0 Waited count: 529 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 38135): State: TIMED_WAITING Blocked count: 0 Waited count: 539 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp170186225-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f9a2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp170186225-154-acceptor-0@6ec10c9e-ServerConnector@19f9049{HTTP/1.1, (http/1.1)}{localhost:33229}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp170186225-155): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp170186225-156): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-11c75c94-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@33f6f20c): State: TIMED_WAITING Blocked count: 0 Waited count: 1049 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 41295): State: TIMED_WAITING Blocked count: 1 Waited count: 54 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 105 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 327 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@69873f16 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-371634539-172.17.0.3-1733072730982 heartbeating to localhost/127.0.0.1:44451): State: TIMED_WAITING Blocked count: 1425 Waited count: 1573 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7628a0f9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 41295): State: TIMED_WAITING Blocked count: 0 Waited count: 525 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 41295): State: TIMED_WAITING Blocked count: 0 Waited count: 539 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 41295): State: TIMED_WAITING Blocked count: 0 Waited count: 547 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 41295): State: TIMED_WAITING Blocked count: 0 Waited count: 544 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 41295): State: TIMED_WAITING Blocked count: 0 Waited count: 548 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data3)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data1)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data4)): State: TIMED_WAITING Blocked count: 10 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data2)): State: TIMED_WAITING Blocked count: 12 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data1/current/BP-371634539-172.17.0.3-1733072730982): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data3/current/BP-371634539-172.17.0.3-1733072730982): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data2/current/BP-371634539-172.17.0.3-1733072730982): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data4/current/BP-371634539-172.17.0.3-1733072730982): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 203 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data6)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 214 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data6/current/BP-371634539-172.17.0.3-1733072730982): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 216 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data5/current/BP-371634539-172.17.0.3-1733072730982): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 222 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 223 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (java.util.concurrent.ThreadPoolExecutor$Worker@6f6a87af[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (java.util.concurrent.ThreadPoolExecutor$Worker@37527041[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 233 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 236 (java.util.concurrent.ThreadPoolExecutor$Worker@201a5c2b[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 18 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 239 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:56318): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 238 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 53 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 242 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 262 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 243 (SyncThread:0): State: WAITING Blocked count: 14 Waited count: 355 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@125e2f8c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 244 (ProcessThread(sid:0 cport:56318):): State: WAITING Blocked count: 0 Waited count: 455 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33e52a59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 245 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 486 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@57fd6504 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 246 (NIOWorkerThread-1): State: WAITING Blocked count: 3 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@60852fa1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 348 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 24 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (Time-limited test-SendThread(127.0.0.1:56318)): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 261 (Time-limited test-EventThread): State: WAITING Blocked count: 13 Waited count: 57 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46c1f91e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 262 (NIOWorkerThread-2): State: WAITING Blocked count: 4 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-3): State: WAITING Blocked count: 7 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-4): State: WAITING Blocked count: 3 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (zk-event-processor-pool-0): State: WAITING Blocked count: 23 Waited count: 71 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6734b74e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-5): State: WAITING Blocked count: 0 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-6): State: WAITING Blocked count: 2 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-7): State: WAITING Blocked count: 3 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-9): State: WAITING Blocked count: 4 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-10): State: WAITING Blocked count: 4 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-12): State: WAITING Blocked count: 4 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-13): State: WAITING Blocked count: 4 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-14): State: WAITING Blocked count: 3 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-15): State: WAITING Blocked count: 5 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-16): State: WAITING Blocked count: 1 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38737): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2e85642e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737): State: WAITING Blocked count: 130 Waited count: 573 Waiting on java.util.concurrent.Semaphore$NonfairSync@17245fd2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737): State: WAITING Blocked count: 111 Waited count: 436 Waiting on java.util.concurrent.Semaphore$NonfairSync@5e644d21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38737): State: WAITING Blocked count: 89 Waited count: 8314 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c281f64 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38737): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1989acdb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38737): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1989acdb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=38737): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2ce6baf7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=38737): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@5017e70f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=38737): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@c980156 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=38737): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@58239b6a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 291 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@418ba39b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 292 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 2 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 314 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 336 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 68 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (M:0;b87883a7882d:38737): State: TIMED_WAITING Blocked count: 12 Waited count: 4009 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1111/0x00007f9a20f97948.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 359 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 53 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (master/b87883a7882d:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (master/b87883a7882d:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 365 (org.apache.hadoop.hdfs.PeerCache@28d4ea9c): State: TIMED_WAITING Blocked count: 0 Waited count: 174 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 384 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5190 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 401 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 62 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 402 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 76 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 415 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 164 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3aa2fac2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 426 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 414 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 51805 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 457 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 15 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 458 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 460 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7f0cd76 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 483 (regionserver/b87883a7882d:0.procedureResultReporter): State: WAITING Blocked count: 19 Waited count: 39 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@69a0c9c5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 484 (regionserver/b87883a7882d:0.procedureResultReporter): State: WAITING Blocked count: 12 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@44f94ad9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 485 (regionserver/b87883a7882d:0.procedureResultReporter): State: WAITING Blocked count: 24 Waited count: 39 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@39e1bbbd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 520 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 531 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 532 (region-location-0): State: WAITING Blocked count: 12 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@38fb4a8a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 413 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 51664 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 544 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 547 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 550 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 564 (ForkJoinPool.commonPool-worker-1): State: WAITING Blocked count: 0 Waited count: 542 Waiting on java.util.concurrent.ForkJoinPool@57fa42b1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 566 (ForkJoinPool.commonPool-worker-3): State: TIMED_WAITING Blocked count: 0 Waited count: 764 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 580 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 594 (region-location-1): State: WAITING Blocked count: 5 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@38fb4a8a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 595 (region-location-2): State: WAITING Blocked count: 3 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@38fb4a8a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 997 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 801 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1057 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1096 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1100 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 70 Waited count: 116 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6dd301d0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1145 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1204 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1205 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1206 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1254 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1255 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1256 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1258 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1259 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1618 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 47 Waiting on java.util.TaskQueue@f9eb9fa Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1980 (region-location-3): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@38fb4a8a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1981 (region-location-4): State: WAITING Blocked count: 4 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@38fb4a8a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2176 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8525 (ForkJoinPool.commonPool-worker-6): State: WAITING Blocked count: 0 Waited count: 23 Waiting on java.util.concurrent.ForkJoinPool@57fa42b1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 10272 (AsyncFSWAL-1-hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/MasterData-prefix:b87883a7882d,38737,1733072736902): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a748d72 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10276 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-01T17:14:35,862 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-01T17:15:05,863 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;b87883a7882d:38737 229 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 24 Waited count: 19 Waiting on java.lang.ref.ReferenceQueue$Lock@5c8be0e0 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 22 Waited count: 25 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 32 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27b4c4d8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 29 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5892 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 60 Waiting on java.util.concurrent.CountDownLatch$Sync@786c893 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 13106 Waited count: 13782 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 15 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@6909b09a Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@689a2b67 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@1f9f0348): State: TIMED_WAITING Blocked count: 1 Waited count: 1173 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 118 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp9797813-37): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f9a2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp9797813-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f9a2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp9797813-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f9a2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp9797813-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f9a2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp9797813-41-acceptor-0@6b1a259b-ServerConnector@39eef6d0{HTTP/1.1, (http/1.1)}{localhost:40833}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp9797813-42): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp9797813-43): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp9797813-44): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-7ee1d391-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 47 Waited count: 3190 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@212971b7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 44451): State: TIMED_WAITING Blocked count: 1 Waited count: 60 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 118 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@39923e56): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 197 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@35eaad8b): State: TIMED_WAITING Blocked count: 0 Waited count: 118 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 196 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 57752 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1440 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4777277e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 44451): State: TIMED_WAITING Blocked count: 95 Waited count: 2595 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 44451): State: TIMED_WAITING Blocked count: 106 Waited count: 2612 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 44451): State: TIMED_WAITING Blocked count: 113 Waited count: 2601 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 44451): State: TIMED_WAITING Blocked count: 107 Waited count: 2595 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 44451): State: TIMED_WAITING Blocked count: 100 Waited count: 2602 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@3d8a563f): State: TIMED_WAITING Blocked count: 0 Waited count: 293 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@d1325d8): State: TIMED_WAITING Blocked count: 0 Waited count: 118 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@33c4416c): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@69c9e3df): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(253478452)): State: TIMED_WAITING Blocked count: 0 Waited count: 21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1860921369-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f9a2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1860921369-88-acceptor-0@221020f4-ServerConnector@451b769{HTTP/1.1, (http/1.1)}{localhost:43843}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1860921369-89): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1860921369-90): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-c4ee99d-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@2d1969d0): State: TIMED_WAITING Blocked count: 0 Waited count: 1170 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 45669): State: TIMED_WAITING Blocked count: 1 Waited count: 60 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 117 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 0 Waited count: 370 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c6547d3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-371634539-172.17.0.3-1733072730982 heartbeating to localhost/127.0.0.1:44451): State: TIMED_WAITING Blocked count: 1507 Waited count: 1602 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7e2c4b02): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 45669): State: TIMED_WAITING Blocked count: 0 Waited count: 591 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 45669): State: TIMED_WAITING Blocked count: 0 Waited count: 586 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 45669): State: TIMED_WAITING Blocked count: 0 Waited count: 585 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 45669): State: TIMED_WAITING Blocked count: 0 Waited count: 588 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 45669): State: TIMED_WAITING Blocked count: 0 Waited count: 588 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1763905095-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f9a2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1763905095-122-acceptor-0@4c1095b9-ServerConnector@2eec8e07{HTTP/1.1, (http/1.1)}{localhost:41601}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1763905095-123): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1763905095-124): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-4d716bcd-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (IPC Client (1396781562) connection to localhost/127.0.0.1:44451 from jenkins): State: TIMED_WAITING Blocked count: 1484 Waited count: 1484 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 119 (IPC Parameter Sending Thread for localhost/127.0.0.1:44451): State: TIMED_WAITING Blocked count: 0 Waited count: 2235 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@238882f1): State: TIMED_WAITING Blocked count: 0 Waited count: 1169 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 38135): State: TIMED_WAITING Blocked count: 1 Waited count: 60 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 117 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 0 Waited count: 356 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4450c098 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-371634539-172.17.0.3-1733072730982 heartbeating to localhost/127.0.0.1:44451): State: TIMED_WAITING Blocked count: 1488 Waited count: 1607 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@717de3e2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 38135): State: TIMED_WAITING Blocked count: 0 Waited count: 592 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 38135): State: TIMED_WAITING Blocked count: 0 Waited count: 610 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 38135): State: TIMED_WAITING Blocked count: 0 Waited count: 601 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 38135): State: TIMED_WAITING Blocked count: 0 Waited count: 589 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 38135): State: TIMED_WAITING Blocked count: 0 Waited count: 599 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp170186225-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f9a2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp170186225-154-acceptor-0@6ec10c9e-ServerConnector@19f9049{HTTP/1.1, (http/1.1)}{localhost:33229}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp170186225-155): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp170186225-156): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-11c75c94-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@33f6f20c): State: TIMED_WAITING Blocked count: 0 Waited count: 1169 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 41295): State: TIMED_WAITING Blocked count: 1 Waited count: 60 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 117 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 347 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@69873f16 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-371634539-172.17.0.3-1733072730982 heartbeating to localhost/127.0.0.1:44451): State: TIMED_WAITING Blocked count: 1445 Waited count: 1613 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7628a0f9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 41295): State: TIMED_WAITING Blocked count: 0 Waited count: 585 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 41295): State: TIMED_WAITING Blocked count: 0 Waited count: 599 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 41295): State: TIMED_WAITING Blocked count: 0 Waited count: 607 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 41295): State: TIMED_WAITING Blocked count: 0 Waited count: 604 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 41295): State: TIMED_WAITING Blocked count: 0 Waited count: 608 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data3)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data1)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data4)): State: TIMED_WAITING Blocked count: 10 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data2)): State: TIMED_WAITING Blocked count: 12 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data1/current/BP-371634539-172.17.0.3-1733072730982): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data3/current/BP-371634539-172.17.0.3-1733072730982): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data2/current/BP-371634539-172.17.0.3-1733072730982): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data4/current/BP-371634539-172.17.0.3-1733072730982): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 203 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data6)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 214 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data6/current/BP-371634539-172.17.0.3-1733072730982): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 216 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data5/current/BP-371634539-172.17.0.3-1733072730982): State: TIMED_WAITING Blocked count: 4 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 222 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 223 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (java.util.concurrent.ThreadPoolExecutor$Worker@6f6a87af[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (java.util.concurrent.ThreadPoolExecutor$Worker@37527041[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 233 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 236 (java.util.concurrent.ThreadPoolExecutor$Worker@201a5c2b[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 239 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:56318): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 238 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 59 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 242 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 292 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 243 (SyncThread:0): State: WAITING Blocked count: 14 Waited count: 359 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@125e2f8c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 244 (ProcessThread(sid:0 cport:56318):): State: WAITING Blocked count: 0 Waited count: 459 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33e52a59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 245 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 490 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@57fd6504 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 246 (NIOWorkerThread-1): State: WAITING Blocked count: 3 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@60852fa1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 376 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 24 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (Time-limited test-SendThread(127.0.0.1:56318)): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 261 (Time-limited test-EventThread): State: WAITING Blocked count: 13 Waited count: 57 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46c1f91e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 262 (NIOWorkerThread-2): State: WAITING Blocked count: 4 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-3): State: WAITING Blocked count: 7 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-4): State: WAITING Blocked count: 3 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (zk-event-processor-pool-0): State: WAITING Blocked count: 23 Waited count: 71 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6734b74e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-5): State: WAITING Blocked count: 0 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-6): State: WAITING Blocked count: 2 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-7): State: WAITING Blocked count: 3 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-9): State: WAITING Blocked count: 4 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-10): State: WAITING Blocked count: 4 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-12): State: WAITING Blocked count: 4 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-13): State: WAITING Blocked count: 4 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-14): State: WAITING Blocked count: 3 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-15): State: WAITING Blocked count: 5 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-16): State: WAITING Blocked count: 1 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38737): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2e85642e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737): State: WAITING Blocked count: 130 Waited count: 573 Waiting on java.util.concurrent.Semaphore$NonfairSync@17245fd2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737): State: WAITING Blocked count: 111 Waited count: 436 Waiting on java.util.concurrent.Semaphore$NonfairSync@5e644d21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38737): State: WAITING Blocked count: 89 Waited count: 8314 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c281f64 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38737): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1989acdb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38737): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1989acdb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=38737): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2ce6baf7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=38737): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@5017e70f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=38737): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@c980156 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=38737): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@58239b6a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 291 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@418ba39b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 292 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 2 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 314 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 336 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 68 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (M:0;b87883a7882d:38737): State: TIMED_WAITING Blocked count: 12 Waited count: 4009 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1111/0x00007f9a20f97948.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 359 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 59 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (master/b87883a7882d:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (master/b87883a7882d:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 365 (org.apache.hadoop.hdfs.PeerCache@28d4ea9c): State: TIMED_WAITING Blocked count: 0 Waited count: 194 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 384 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5790 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 401 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 62 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 402 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 76 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 415 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 164 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3aa2fac2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 426 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 414 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 57807 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 457 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 15 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 458 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 460 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7f0cd76 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 483 (regionserver/b87883a7882d:0.procedureResultReporter): State: WAITING Blocked count: 19 Waited count: 39 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@69a0c9c5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 484 (regionserver/b87883a7882d:0.procedureResultReporter): State: WAITING Blocked count: 12 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@44f94ad9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 485 (regionserver/b87883a7882d:0.procedureResultReporter): State: WAITING Blocked count: 24 Waited count: 39 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@39e1bbbd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 520 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 531 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 532 (region-location-0): State: WAITING Blocked count: 12 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@38fb4a8a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 413 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 57667 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 544 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 547 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 550 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 564 (ForkJoinPool.commonPool-worker-1): State: WAITING Blocked count: 0 Waited count: 542 Waiting on java.util.concurrent.ForkJoinPool@57fa42b1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 580 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 594 (region-location-1): State: WAITING Blocked count: 5 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@38fb4a8a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 595 (region-location-2): State: WAITING Blocked count: 3 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@38fb4a8a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 997 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 807 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1057 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1096 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1100 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 70 Waited count: 116 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6dd301d0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1145 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1204 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1205 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1206 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1254 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1255 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1256 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1258 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1259 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1618 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 47 Waiting on java.util.TaskQueue@f9eb9fa Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1980 (region-location-3): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@38fb4a8a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1981 (region-location-4): State: WAITING Blocked count: 4 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@38fb4a8a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2176 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8525 (ForkJoinPool.commonPool-worker-6): State: TIMED_WAITING Blocked count: 0 Waited count: 24 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 10272 (AsyncFSWAL-1-hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/MasterData-prefix:b87883a7882d,38737,1733072736902): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a748d72 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10276 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 10277 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-12-01T17:15:35,863 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-01T17:15:38,101 DEBUG [master/b87883a7882d:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=7, created chunk count=10, reused chunk count=28, reuseRatio=73.68% 2024-12-01T17:15:38,101 DEBUG [master/b87883a7882d:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-01T17:15:46,184 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-01T17:16:05,863 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;b87883a7882d:38737 228 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 24 Waited count: 19 Waiting on java.lang.ref.ReferenceQueue$Lock@5c8be0e0 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 22 Waited count: 26 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27b4c4d8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 32 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 6492 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 66 Waiting on java.util.concurrent.CountDownLatch$Sync@c00cfdc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 13107 Waited count: 13783 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 15 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@6909b09a Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@689a2b67 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@1f9f0348): State: TIMED_WAITING Blocked count: 1 Waited count: 1293 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 130 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp9797813-37): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f9a2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp9797813-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f9a2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp9797813-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f9a2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp9797813-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f9a2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp9797813-41-acceptor-0@6b1a259b-ServerConnector@39eef6d0{HTTP/1.1, (http/1.1)}{localhost:40833}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp9797813-42): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp9797813-43): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp9797813-44): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-7ee1d391-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 47 Waited count: 3190 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@212971b7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 44451): State: TIMED_WAITING Blocked count: 1 Waited count: 66 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 130 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@39923e56): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 217 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@35eaad8b): State: TIMED_WAITING Blocked count: 0 Waited count: 130 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 216 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 63681 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1440 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4777277e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 44451): State: TIMED_WAITING Blocked count: 95 Waited count: 2655 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 44451): State: TIMED_WAITING Blocked count: 106 Waited count: 2673 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 44451): State: TIMED_WAITING Blocked count: 113 Waited count: 2661 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 44451): State: TIMED_WAITING Blocked count: 107 Waited count: 2655 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 44451): State: TIMED_WAITING Blocked count: 100 Waited count: 2662 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@3d8a563f): State: TIMED_WAITING Blocked count: 0 Waited count: 323 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@d1325d8): State: TIMED_WAITING Blocked count: 0 Waited count: 130 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@33c4416c): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@69c9e3df): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(253478452)): State: TIMED_WAITING Blocked count: 0 Waited count: 23 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1860921369-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f9a2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1860921369-88-acceptor-0@221020f4-ServerConnector@451b769{HTTP/1.1, (http/1.1)}{localhost:43843}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1860921369-89): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1860921369-90): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-c4ee99d-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@2d1969d0): State: TIMED_WAITING Blocked count: 0 Waited count: 1290 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 45669): State: TIMED_WAITING Blocked count: 1 Waited count: 66 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 129 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 0 Waited count: 390 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c6547d3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-371634539-172.17.0.3-1733072730982 heartbeating to localhost/127.0.0.1:44451): State: TIMED_WAITING Blocked count: 1527 Waited count: 1642 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7e2c4b02): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 45669): State: TIMED_WAITING Blocked count: 0 Waited count: 651 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 45669): State: TIMED_WAITING Blocked count: 0 Waited count: 646 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 45669): State: TIMED_WAITING Blocked count: 0 Waited count: 645 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 45669): State: TIMED_WAITING Blocked count: 0 Waited count: 648 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 45669): State: TIMED_WAITING Blocked count: 0 Waited count: 648 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1763905095-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f9a2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1763905095-122-acceptor-0@4c1095b9-ServerConnector@2eec8e07{HTTP/1.1, (http/1.1)}{localhost:41601}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1763905095-123): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1763905095-124): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-4d716bcd-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (IPC Client (1396781562) connection to localhost/127.0.0.1:44451 from jenkins): State: TIMED_WAITING Blocked count: 1530 Waited count: 1530 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 119 (IPC Parameter Sending Thread for localhost/127.0.0.1:44451): State: TIMED_WAITING Blocked count: 0 Waited count: 2295 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@238882f1): State: TIMED_WAITING Blocked count: 0 Waited count: 1289 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 38135): State: TIMED_WAITING Blocked count: 1 Waited count: 66 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 129 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 0 Waited count: 376 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4450c098 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-371634539-172.17.0.3-1733072730982 heartbeating to localhost/127.0.0.1:44451): State: TIMED_WAITING Blocked count: 1508 Waited count: 1647 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@717de3e2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 38135): State: TIMED_WAITING Blocked count: 0 Waited count: 652 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 38135): State: TIMED_WAITING Blocked count: 0 Waited count: 670 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 38135): State: TIMED_WAITING Blocked count: 0 Waited count: 661 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 38135): State: TIMED_WAITING Blocked count: 0 Waited count: 649 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 38135): State: TIMED_WAITING Blocked count: 0 Waited count: 659 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp170186225-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f9a2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp170186225-154-acceptor-0@6ec10c9e-ServerConnector@19f9049{HTTP/1.1, (http/1.1)}{localhost:33229}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp170186225-155): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp170186225-156): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-11c75c94-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@33f6f20c): State: TIMED_WAITING Blocked count: 0 Waited count: 1289 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 41295): State: TIMED_WAITING Blocked count: 1 Waited count: 66 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 129 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 367 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@69873f16 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-371634539-172.17.0.3-1733072730982 heartbeating to localhost/127.0.0.1:44451): State: TIMED_WAITING Blocked count: 1465 Waited count: 1653 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7628a0f9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 41295): State: TIMED_WAITING Blocked count: 0 Waited count: 645 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 41295): State: TIMED_WAITING Blocked count: 0 Waited count: 659 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 41295): State: TIMED_WAITING Blocked count: 0 Waited count: 667 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 41295): State: TIMED_WAITING Blocked count: 0 Waited count: 664 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 41295): State: TIMED_WAITING Blocked count: 0 Waited count: 668 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data3)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data1)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data4)): State: TIMED_WAITING Blocked count: 10 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data2)): State: TIMED_WAITING Blocked count: 12 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data1/current/BP-371634539-172.17.0.3-1733072730982): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data3/current/BP-371634539-172.17.0.3-1733072730982): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data2/current/BP-371634539-172.17.0.3-1733072730982): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data4/current/BP-371634539-172.17.0.3-1733072730982): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 203 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data6)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 214 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data6/current/BP-371634539-172.17.0.3-1733072730982): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 216 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data5/current/BP-371634539-172.17.0.3-1733072730982): State: TIMED_WAITING Blocked count: 4 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 222 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@d9b9504 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 223 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@58f5cf70 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (java.util.concurrent.ThreadPoolExecutor$Worker@6f6a87af[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (java.util.concurrent.ThreadPoolExecutor$Worker@37527041[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 233 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60d08584 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 236 (java.util.concurrent.ThreadPoolExecutor$Worker@201a5c2b[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 22 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 239 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:56318): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 238 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 65 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 242 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 322 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 243 (SyncThread:0): State: WAITING Blocked count: 14 Waited count: 363 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@125e2f8c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 244 (ProcessThread(sid:0 cport:56318):): State: WAITING Blocked count: 0 Waited count: 463 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33e52a59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 245 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 494 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@57fd6504 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 246 (NIOWorkerThread-1): State: WAITING Blocked count: 3 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@60852fa1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 407 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 24 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (Time-limited test-SendThread(127.0.0.1:56318)): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 261 (Time-limited test-EventThread): State: WAITING Blocked count: 13 Waited count: 57 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46c1f91e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 262 (NIOWorkerThread-2): State: WAITING Blocked count: 4 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-3): State: WAITING Blocked count: 7 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-4): State: WAITING Blocked count: 3 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (zk-event-processor-pool-0): State: WAITING Blocked count: 23 Waited count: 71 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6734b74e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-5): State: WAITING Blocked count: 0 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-6): State: WAITING Blocked count: 2 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-7): State: WAITING Blocked count: 3 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-9): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-10): State: WAITING Blocked count: 4 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-12): State: WAITING Blocked count: 4 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-13): State: WAITING Blocked count: 4 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-14): State: WAITING Blocked count: 3 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-15): State: WAITING Blocked count: 5 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-16): State: WAITING Blocked count: 1 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38737): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2e85642e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737): State: WAITING Blocked count: 130 Waited count: 573 Waiting on java.util.concurrent.Semaphore$NonfairSync@17245fd2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737): State: WAITING Blocked count: 111 Waited count: 436 Waiting on java.util.concurrent.Semaphore$NonfairSync@5e644d21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38737): State: WAITING Blocked count: 89 Waited count: 8314 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c281f64 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38737): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1989acdb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38737): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1989acdb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=38737): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2ce6baf7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=38737): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@5017e70f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=38737): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@c980156 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=38737): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@58239b6a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 291 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@418ba39b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 292 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 2 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 314 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 336 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 68 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (M:0;b87883a7882d:38737): State: TIMED_WAITING Blocked count: 12 Waited count: 4009 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1111/0x00007f9a20f97948.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 359 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 65 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (master/b87883a7882d:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (master/b87883a7882d:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 365 (org.apache.hadoop.hdfs.PeerCache@28d4ea9c): State: TIMED_WAITING Blocked count: 0 Waited count: 214 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 384 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 6389 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 401 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 62 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 402 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 76 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 415 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 164 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3aa2fac2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 426 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 64 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 414 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 63810 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 457 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 15 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 458 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 460 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7f0cd76 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 483 (regionserver/b87883a7882d:0.procedureResultReporter): State: WAITING Blocked count: 19 Waited count: 39 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@69a0c9c5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 484 (regionserver/b87883a7882d:0.procedureResultReporter): State: WAITING Blocked count: 12 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@44f94ad9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 485 (regionserver/b87883a7882d:0.procedureResultReporter): State: WAITING Blocked count: 24 Waited count: 39 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@39e1bbbd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 520 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 531 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 532 (region-location-0): State: WAITING Blocked count: 12 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@38fb4a8a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 413 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 63669 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 544 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 547 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 550 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 564 (ForkJoinPool.commonPool-worker-1): State: TIMED_WAITING Blocked count: 0 Waited count: 543 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 580 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 594 (region-location-1): State: WAITING Blocked count: 5 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@38fb4a8a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 595 (region-location-2): State: WAITING Blocked count: 3 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@38fb4a8a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 997 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 813 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1057 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1096 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1100 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 70 Waited count: 116 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6dd301d0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1145 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1204 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1205 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1206 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1254 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1255 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1256 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1258 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1259 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1618 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 47 Waiting on java.util.TaskQueue@f9eb9fa Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1980 (region-location-3): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@38fb4a8a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1981 (region-location-4): State: WAITING Blocked count: 4 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@38fb4a8a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2176 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10272 (AsyncFSWAL-1-hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/MasterData-prefix:b87883a7882d,38737,1733072736902): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a748d72 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10277 (process reaper): State: TIMED_WAITING Blocked count: 1 Waited count: 5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10282 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-01T17:16:35,864 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-01T17:17:05,864 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-01T17:17:18,029 DEBUG [M:0;b87883a7882d:38737 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733073138015Disabling compacts and flushes for region at 1733073138015Disabling writes for close at 1733073138026 (+11 ms)Obtaining lock to block concurrent updates at 1733073138026Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733073138026Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=1015760, getHeapSize=1218384, getOffHeapSize=0, getCellsCount=2672 at 1733073138027 (+1 ms)Failed flush master:store,,1.1595e783b53d99cd5eef43b6debb2682., putting online again at 1733073438028 (+300001 ms) 2024-12-01T17:17:18,029 WARN [M:0;b87883a7882d:38737 {}] region.MasterRegion(134): Failed to close region org.apache.hadoop.hbase.regionserver.wal.WALSyncTimeoutIOException: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=4593, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1033) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.closeRegion(MasterRegion.java:132) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:205) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=4593, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:171) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) ~[classes/:?] ... 19 more 2024-12-01T17:17:18,033 WARN [Close-WAL-Writer-0 {}] wal.AsyncProtobufLogWriter(165): normal close failed, try recover java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.ensureWritable(int)" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.writeInt(FanOutOneBlockAsyncDFSOutput.java:391) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.lambda$writeWALTrailerAndMagic$3(AsyncProtobufLogWriter.java:247) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALMetadata(AsyncProtobufLogWriter.java:203) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALTrailerAndMagic(AsyncProtobufLogWriter.java:240) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractProtobufLogWriter.writeWALTrailer(AbstractProtobufLogWriter.java:252) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:162) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T17:17:18,036 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-01T17:17:18,036 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-01T17:17:18,036 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file /user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/MasterData/WALs/b87883a7882d,38737,1733072736902/b87883a7882d%2C38737%2C1733072736902.1733072738878 2024-12-01T17:17:18,041 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/MasterData/WALs/b87883a7882d,38737,1733072736902/b87883a7882d%2C38737%2C1733072736902.1733072738878 after 1ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T17:17:18,042 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.InterruptedIOException: Operation cancelled at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.checkIfCancelled(RecoverLeaseFSUtils.java:269) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:159) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T17:17:18,042 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/MasterData/WALs/b87883a7882d,38737,1733072736902/b87883a7882d%2C38737%2C1733072736902.1733072738878 2024-12-01T17:17:18,042 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/MasterData/WALs/b87883a7882d,38737,1733072736902/b87883a7882d%2C38737%2C1733072736902.1733072738878 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;b87883a7882d:38737 229 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 24 Waited count: 19 Waiting on java.lang.ref.ReferenceQueue$Lock@5c8be0e0 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 22 Waited count: 27 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 38 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27b4c4d8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 35 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 7091 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 72 Waiting on java.util.concurrent.CountDownLatch$Sync@2fb158b5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 13107 Waited count: 13784 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 15 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@6909b09a Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@689a2b67 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@1f9f0348): State: TIMED_WAITING Blocked count: 1 Waited count: 1413 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 142 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp9797813-37): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f9a2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp9797813-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f9a2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp9797813-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f9a2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp9797813-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f9a2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp9797813-41-acceptor-0@6b1a259b-ServerConnector@39eef6d0{HTTP/1.1, (http/1.1)}{localhost:40833}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp9797813-42): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp9797813-43): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp9797813-44): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-7ee1d391-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 47 Waited count: 3190 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@212971b7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 44451): State: TIMED_WAITING Blocked count: 1 Waited count: 72 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 142 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@39923e56): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 237 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@35eaad8b): State: TIMED_WAITING Blocked count: 0 Waited count: 142 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 236 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 69610 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1440 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4777277e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 44451): State: TIMED_WAITING Blocked count: 100 Waited count: 2715 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 44451): State: TIMED_WAITING Blocked count: 106 Waited count: 2733 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 44451): State: TIMED_WAITING Blocked count: 113 Waited count: 2721 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 44451): State: TIMED_WAITING Blocked count: 108 Waited count: 2715 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 44451): State: TIMED_WAITING Blocked count: 100 Waited count: 2722 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@3d8a563f): State: TIMED_WAITING Blocked count: 0 Waited count: 353 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@d1325d8): State: TIMED_WAITING Blocked count: 0 Waited count: 142 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@33c4416c): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@69c9e3df): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(253478452)): State: TIMED_WAITING Blocked count: 0 Waited count: 25 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1860921369-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f9a2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1860921369-88-acceptor-0@221020f4-ServerConnector@451b769{HTTP/1.1, (http/1.1)}{localhost:43843}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1860921369-89): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1860921369-90): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-c4ee99d-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@2d1969d0): State: TIMED_WAITING Blocked count: 0 Waited count: 1410 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 45669): State: TIMED_WAITING Blocked count: 1 Waited count: 72 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 141 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 0 Waited count: 410 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c6547d3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-371634539-172.17.0.3-1733072730982 heartbeating to localhost/127.0.0.1:44451): State: TIMED_WAITING Blocked count: 1548 Waited count: 1683 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7e2c4b02): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 45669): State: TIMED_WAITING Blocked count: 0 Waited count: 711 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 45669): State: TIMED_WAITING Blocked count: 0 Waited count: 706 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 45669): State: TIMED_WAITING Blocked count: 0 Waited count: 705 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 45669): State: TIMED_WAITING Blocked count: 0 Waited count: 708 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 45669): State: TIMED_WAITING Blocked count: 0 Waited count: 708 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1763905095-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f9a2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1763905095-122-acceptor-0@4c1095b9-ServerConnector@2eec8e07{HTTP/1.1, (http/1.1)}{localhost:41601}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1763905095-123): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1763905095-124): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-4d716bcd-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (IPC Client (1396781562) connection to localhost/127.0.0.1:44451 from jenkins): State: TIMED_WAITING Blocked count: 1581 Waited count: 1581 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 119 (IPC Parameter Sending Thread for localhost/127.0.0.1:44451): State: TIMED_WAITING Blocked count: 0 Waited count: 2348 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@238882f1): State: TIMED_WAITING Blocked count: 0 Waited count: 1409 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 38135): State: TIMED_WAITING Blocked count: 1 Waited count: 72 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 141 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 0 Waited count: 396 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4450c098 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-371634539-172.17.0.3-1733072730982 heartbeating to localhost/127.0.0.1:44451): State: TIMED_WAITING Blocked count: 1528 Waited count: 1693 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@717de3e2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 38135): State: TIMED_WAITING Blocked count: 0 Waited count: 712 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 38135): State: TIMED_WAITING Blocked count: 0 Waited count: 730 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 38135): State: TIMED_WAITING Blocked count: 0 Waited count: 721 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 38135): State: TIMED_WAITING Blocked count: 0 Waited count: 709 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 38135): State: TIMED_WAITING Blocked count: 0 Waited count: 719 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp170186225-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f9a2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp170186225-154-acceptor-0@6ec10c9e-ServerConnector@19f9049{HTTP/1.1, (http/1.1)}{localhost:33229}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp170186225-155): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp170186225-156): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-11c75c94-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@33f6f20c): State: TIMED_WAITING Blocked count: 0 Waited count: 1409 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 41295): State: TIMED_WAITING Blocked count: 1 Waited count: 72 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 141 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 387 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@69873f16 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-371634539-172.17.0.3-1733072730982 heartbeating to localhost/127.0.0.1:44451): State: TIMED_WAITING Blocked count: 1485 Waited count: 1693 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7628a0f9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 41295): State: TIMED_WAITING Blocked count: 0 Waited count: 705 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 41295): State: TIMED_WAITING Blocked count: 0 Waited count: 719 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 41295): State: TIMED_WAITING Blocked count: 0 Waited count: 727 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 41295): State: TIMED_WAITING Blocked count: 0 Waited count: 724 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 41295): State: TIMED_WAITING Blocked count: 0 Waited count: 728 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data3)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data1)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data4)): State: TIMED_WAITING Blocked count: 10 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data2)): State: TIMED_WAITING Blocked count: 12 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data1/current/BP-371634539-172.17.0.3-1733072730982): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data3/current/BP-371634539-172.17.0.3-1733072730982): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data2/current/BP-371634539-172.17.0.3-1733072730982): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data4/current/BP-371634539-172.17.0.3-1733072730982): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 203 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data6)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 214 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data6/current/BP-371634539-172.17.0.3-1733072730982): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 216 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data5/current/BP-371634539-172.17.0.3-1733072730982): State: TIMED_WAITING Blocked count: 4 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 222 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@d9b9504 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 223 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@58f5cf70 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (java.util.concurrent.ThreadPoolExecutor$Worker@6f6a87af[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (java.util.concurrent.ThreadPoolExecutor$Worker@37527041[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 233 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60d08584 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 236 (java.util.concurrent.ThreadPoolExecutor$Worker@201a5c2b[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 24 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 239 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:56318): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 238 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 71 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 242 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 352 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 243 (SyncThread:0): State: WAITING Blocked count: 14 Waited count: 368 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@125e2f8c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 244 (ProcessThread(sid:0 cport:56318):): State: WAITING Blocked count: 0 Waited count: 468 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33e52a59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 245 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 499 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@57fd6504 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 246 (NIOWorkerThread-1): State: WAITING Blocked count: 3 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@60852fa1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 435 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 24 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (Time-limited test-SendThread(127.0.0.1:56318)): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 261 (Time-limited test-EventThread): State: WAITING Blocked count: 13 Waited count: 57 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46c1f91e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 262 (NIOWorkerThread-2): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-3): State: WAITING Blocked count: 7 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-4): State: WAITING Blocked count: 3 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (zk-event-processor-pool-0): State: WAITING Blocked count: 23 Waited count: 71 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6734b74e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-5): State: WAITING Blocked count: 0 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-6): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-7): State: WAITING Blocked count: 3 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-9): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-10): State: WAITING Blocked count: 4 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-12): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-13): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-14): State: WAITING Blocked count: 3 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-15): State: WAITING Blocked count: 5 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-16): State: WAITING Blocked count: 1 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c4421a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38737): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2e85642e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38737): State: WAITING Blocked count: 130 Waited count: 573 Waiting on java.util.concurrent.Semaphore$NonfairSync@17245fd2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737): State: WAITING Blocked count: 111 Waited count: 436 Waiting on java.util.concurrent.Semaphore$NonfairSync@5e644d21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38737): State: WAITING Blocked count: 89 Waited count: 8314 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c281f64 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38737): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1989acdb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38737): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1989acdb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=38737): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2ce6baf7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=38737): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@5017e70f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=38737): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@c980156 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=38737): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@58239b6a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 291 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@418ba39b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 292 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 2 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 314 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 336 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 68 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (M:0;b87883a7882d:38737): State: TIMED_WAITING Blocked count: 12 Waited count: 4010 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.shutdown(AbstractFSWAL.java:1195) app//org.apache.hadoop.hbase.wal.AbstractFSWALProvider.shutdown0(AbstractFSWALProvider.java:162) app//org.apache.hadoop.hbase.wal.AbstractWALProvider$$Lambda$1433/0x00007f9a21238d78.run(Unknown Source) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.cleanup(AbstractWALProvider.java:287) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.shutdown(AbstractWALProvider.java:299) app//org.apache.hadoop.hbase.wal.WALFactory.shutdown(WALFactory.java:341) app//org.apache.hadoop.hbase.master.region.MasterRegion.shutdownWAL(MasterRegion.java:140) app//org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:206) app//org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 71 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (master/b87883a7882d:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (master/b87883a7882d:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 365 (org.apache.hadoop.hdfs.PeerCache@28d4ea9c): State: TIMED_WAITING Blocked count: 0 Waited count: 234 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 384 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 6988 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 401 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 62 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 402 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 76 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 415 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 164 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3aa2fac2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 426 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 70 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 414 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 69812 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 457 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 15 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 458 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 460 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7f0cd76 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 483 (regionserver/b87883a7882d:0.procedureResultReporter): State: WAITING Blocked count: 19 Waited count: 39 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@69a0c9c5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 484 (regionserver/b87883a7882d:0.procedureResultReporter): State: WAITING Blocked count: 12 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@44f94ad9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 485 (regionserver/b87883a7882d:0.procedureResultReporter): State: WAITING Blocked count: 24 Waited count: 39 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@39e1bbbd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 520 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 531 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 532 (region-location-0): State: WAITING Blocked count: 12 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@38fb4a8a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 413 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 69671 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 544 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 547 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 550 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 580 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 594 (region-location-1): State: WAITING Blocked count: 5 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@38fb4a8a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 595 (region-location-2): State: WAITING Blocked count: 3 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@38fb4a8a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 997 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 819 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1057 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1096 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1100 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 70 Waited count: 116 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6dd301d0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1145 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1204 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1205 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1206 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1254 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1255 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1256 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1258 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1259 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1618 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 47 Waiting on java.util.TaskQueue@f9eb9fa Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1980 (region-location-3): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@38fb4a8a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1981 (region-location-4): State: WAITING Blocked count: 4 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@38fb4a8a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2176 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10272 (AsyncFSWAL-1-hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/MasterData-prefix:b87883a7882d,38737,1733072736902): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a748d72 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10277 (process reaper): State: TIMED_WAITING Blocked count: 1 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10282 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 10285 (WAL-Shutdown-0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.awaitTermination(ThreadPoolExecutor.java:1464) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doShutdown(AbstractFSWAL.java:2117) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1179) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1174) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10286 (Close-WAL-Writer-0): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:166) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1423/0x00007f9a21234ad0.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-12-01T17:17:22,043 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=1 on file=hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/MasterData/WALs/b87883a7882d,38737,1733072736902/b87883a7882d%2C38737%2C1733072736902.1733072738878 after 4001ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T17:17:23,033 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.async.wait.on.shutdown.seconds" 2024-12-01T17:17:23,034 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-01T17:17:23,034 INFO [M:0;b87883a7882d:38737 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-01T17:17:23,035 INFO [M:0;b87883a7882d:38737 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:38737 2024-12-01T17:17:23,035 INFO [M:0;b87883a7882d:38737 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-01T17:17:23,046 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/b53cabd4-726b-d527-8dc7-f40f64f13546/MasterData/WALs/b87883a7882d,38737,1733072736902/b87883a7882d%2C38737%2C1733072736902.1733072738878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-12-01T17:17:23,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T17:17:23,204 INFO [M:0;b87883a7882d:38737 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-01T17:17:23,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38737-0x10191db4a6c0000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T17:17:23,246 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5bae1b2c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T17:17:23,247 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@19f9049{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T17:17:23,247 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T17:17:23,247 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7aab5215{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T17:17:23,247 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2bd013ba{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/hadoop.log.dir/,STOPPED} 2024-12-01T17:17:23,249 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-01T17:17:23,250 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-01T17:17:23,250 WARN [BP-371634539-172.17.0.3-1733072730982 heartbeating to localhost/127.0.0.1:44451 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-01T17:17:23,250 WARN [BP-371634539-172.17.0.3-1733072730982 heartbeating to localhost/127.0.0.1:44451 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-371634539-172.17.0.3-1733072730982 (Datanode Uuid b8da1b8e-1d50-48ab-8b09-d6e13300af61) service to localhost/127.0.0.1:44451 2024-12-01T17:17:23,253 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data5/current/BP-371634539-172.17.0.3-1733072730982 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T17:17:23,253 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data6/current/BP-371634539-172.17.0.3-1733072730982 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T17:17:23,254 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-01T17:17:23,256 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1585fad{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T17:17:23,257 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2eec8e07{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T17:17:23,257 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T17:17:23,257 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5614c0c4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T17:17:23,257 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2c7367e1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/hadoop.log.dir/,STOPPED} 2024-12-01T17:17:23,258 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-01T17:17:23,258 WARN [BP-371634539-172.17.0.3-1733072730982 heartbeating to localhost/127.0.0.1:44451 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-01T17:17:23,258 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-01T17:17:23,258 WARN [BP-371634539-172.17.0.3-1733072730982 heartbeating to localhost/127.0.0.1:44451 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-371634539-172.17.0.3-1733072730982 (Datanode Uuid a892d38a-cd18-41ed-bb98-de9c143d2f76) service to localhost/127.0.0.1:44451 2024-12-01T17:17:23,259 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data3/current/BP-371634539-172.17.0.3-1733072730982 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T17:17:23,259 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data4/current/BP-371634539-172.17.0.3-1733072730982 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T17:17:23,259 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-01T17:17:23,261 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5f198833{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T17:17:23,262 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@451b769{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T17:17:23,262 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T17:17:23,262 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@608fba0d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T17:17:23,262 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6887c008{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/hadoop.log.dir/,STOPPED} 2024-12-01T17:17:23,263 WARN [BP-371634539-172.17.0.3-1733072730982 heartbeating to localhost/127.0.0.1:44451 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-01T17:17:23,263 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-01T17:17:23,263 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-01T17:17:23,263 WARN [BP-371634539-172.17.0.3-1733072730982 heartbeating to localhost/127.0.0.1:44451 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-371634539-172.17.0.3-1733072730982 (Datanode Uuid 97eaf1fc-87b0-40ad-a118-b65f75a1b831) service to localhost/127.0.0.1:44451 2024-12-01T17:17:23,264 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data1/current/BP-371634539-172.17.0.3-1733072730982 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T17:17:23,264 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/cluster_95c05480-bb9c-bb0e-e344-a8eb85018e74/data/data2/current/BP-371634539-172.17.0.3-1733072730982 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T17:17:23,264 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-01T17:17:23,270 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@79cacdbc{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-01T17:17:23,271 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@39eef6d0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T17:17:23,271 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T17:17:23,271 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1e7cf71{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T17:17:23,271 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@37667246{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/57fa8cb8-f102-0f2a-f11f-e88fe106749f/hadoop.log.dir/,STOPPED} 2024-12-01T17:17:23,282 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-01T17:17:23,448 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down